2024-12-03 21:16:27,710 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-03 21:16:27,721 main DEBUG Took 0.009570 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-03 21:16:27,722 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-03 21:16:27,722 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-03 21:16:27,723 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-03 21:16:27,725 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 21:16:27,732 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-03 21:16:27,743 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 21:16:27,745 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 21:16:27,745 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 21:16:27,745 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 21:16:27,746 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 21:16:27,746 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 21:16:27,747 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 21:16:27,747 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 21:16:27,747 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 21:16:27,748 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 21:16:27,748 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 21:16:27,749 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 21:16:27,749 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 21:16:27,749 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 21:16:27,750 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 21:16:27,750 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 21:16:27,750 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 21:16:27,750 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 21:16:27,751 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 21:16:27,751 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 21:16:27,752 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 21:16:27,752 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 21:16:27,753 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 21:16:27,753 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 21:16:27,753 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 21:16:27,753 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-03 21:16:27,755 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 21:16:27,756 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-03 21:16:27,758 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-03 21:16:27,759 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-03 21:16:27,760 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-03 21:16:27,761 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-03 21:16:27,768 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-03 21:16:27,771 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-03 21:16:27,773 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-03 21:16:27,773 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-03 21:16:27,773 main DEBUG createAppenders(={Console}) 2024-12-03 21:16:27,774 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-12-03 21:16:27,774 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-03 21:16:27,775 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-12-03 21:16:27,775 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-03 21:16:27,776 main DEBUG OutputStream closed 2024-12-03 21:16:27,776 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-03 21:16:27,776 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-03 21:16:27,776 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-12-03 21:16:27,838 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-03 21:16:27,841 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-03 21:16:27,843 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-03 21:16:27,844 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-03 21:16:27,845 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-03 21:16:27,845 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-03 21:16:27,846 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-03 21:16:27,846 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-03 21:16:27,846 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-03 21:16:27,847 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-03 21:16:27,847 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-03 21:16:27,847 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-03 21:16:27,848 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-03 21:16:27,848 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-03 21:16:27,848 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-03 21:16:27,848 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-03 21:16:27,849 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-03 21:16:27,849 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-03 21:16:27,851 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-03 21:16:27,852 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-12-03 21:16:27,852 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-03 21:16:27,853 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-12-03T21:16:28,144 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb 2024-12-03 21:16:28,147 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-03 21:16:28,147 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-03T21:16:28,155 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-12-03T21:16:28,189 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=472, ProcessCount=11, AvailableMemoryMB=2662 2024-12-03T21:16:28,192 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T21:16:28,205 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/cluster_2fbebfbd-0181-9d97-ac44-56e537377f9a, deleteOnExit=true 2024-12-03T21:16:28,205 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-03T21:16:28,206 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/test.cache.data in system properties and HBase conf 2024-12-03T21:16:28,207 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T21:16:28,207 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/hadoop.log.dir in system properties and HBase conf 2024-12-03T21:16:28,208 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T21:16:28,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T21:16:28,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-03T21:16:28,307 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-03T21:16:28,452 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T21:16:28,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T21:16:28,458 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T21:16:28,458 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T21:16:28,459 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T21:16:28,459 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T21:16:28,460 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T21:16:28,460 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T21:16:28,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T21:16:28,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T21:16:28,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/nfs.dump.dir in system properties and HBase conf 2024-12-03T21:16:28,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/java.io.tmpdir in system properties and HBase conf 2024-12-03T21:16:28,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T21:16:28,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T21:16:28,463 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T21:16:29,042 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T21:16:29,590 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-03T21:16:29,656 INFO [Time-limited test {}] log.Log(170): Logging initialized @2753ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-03T21:16:29,723 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:16:29,779 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T21:16:29,800 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T21:16:29,800 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T21:16:29,802 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T21:16:29,817 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:16:29,819 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3aee6cb7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/hadoop.log.dir/,AVAILABLE} 2024-12-03T21:16:29,820 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@380b8195{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T21:16:29,984 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6de997b9{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/java.io.tmpdir/jetty-localhost-45699-hadoop-hdfs-3_4_1-tests_jar-_-any-15189084957039351052/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T21:16:29,994 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7a0da00a{HTTP/1.1, (http/1.1)}{localhost:45699} 2024-12-03T21:16:29,994 INFO [Time-limited test {}] server.Server(415): Started @3092ms 2024-12-03T21:16:30,021 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T21:16:30,464 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:16:30,470 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T21:16:30,471 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T21:16:30,471 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T21:16:30,471 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T21:16:30,472 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3305dd74{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/hadoop.log.dir/,AVAILABLE} 2024-12-03T21:16:30,472 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7893eb07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T21:16:30,575 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3f93babe{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/java.io.tmpdir/jetty-localhost-40229-hadoop-hdfs-3_4_1-tests_jar-_-any-2167553782439990455/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:16:30,576 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@737d6c99{HTTP/1.1, (http/1.1)}{localhost:40229} 2024-12-03T21:16:30,576 INFO [Time-limited test {}] server.Server(415): Started @3674ms 2024-12-03T21:16:30,623 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T21:16:30,737 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:16:30,744 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T21:16:30,746 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T21:16:30,746 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T21:16:30,747 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T21:16:30,747 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ff5148a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/hadoop.log.dir/,AVAILABLE} 2024-12-03T21:16:30,748 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25ca9bb3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T21:16:30,845 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6c963ecd{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/java.io.tmpdir/jetty-localhost-35201-hadoop-hdfs-3_4_1-tests_jar-_-any-16575672534651004829/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:16:30,845 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5a10aed{HTTP/1.1, (http/1.1)}{localhost:35201} 2024-12-03T21:16:30,845 INFO [Time-limited test {}] server.Server(415): Started @3943ms 2024-12-03T21:16:30,848 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T21:16:32,163 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/cluster_2fbebfbd-0181-9d97-ac44-56e537377f9a/data/data3/current/BP-1709057562-172.17.0.2-1733260589147/current, will proceed with Du for space computation calculation, 2024-12-03T21:16:32,163 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/cluster_2fbebfbd-0181-9d97-ac44-56e537377f9a/data/data1/current/BP-1709057562-172.17.0.2-1733260589147/current, will proceed with Du for space computation calculation, 2024-12-03T21:16:32,163 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/cluster_2fbebfbd-0181-9d97-ac44-56e537377f9a/data/data4/current/BP-1709057562-172.17.0.2-1733260589147/current, will proceed with Du for space computation calculation, 2024-12-03T21:16:32,163 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/cluster_2fbebfbd-0181-9d97-ac44-56e537377f9a/data/data2/current/BP-1709057562-172.17.0.2-1733260589147/current, will proceed with Du for space computation calculation, 2024-12-03T21:16:32,209 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T21:16:32,213 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T21:16:32,282 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa65979475a35aa46 with lease ID 0x49fced1c4ab731b8: Processing first storage report for DS-3c776736-a8f3-4474-b8e2-e005eb4ffa00 from datanode DatanodeRegistration(127.0.0.1:41099, datanodeUuid=390180c2-d368-47b5-b469-72cd7a5213e0, infoPort=39191, infoSecurePort=0, ipcPort=34003, storageInfo=lv=-57;cid=testClusterID;nsid=459743402;c=1733260589147) 2024-12-03T21:16:32,284 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa65979475a35aa46 with lease ID 0x49fced1c4ab731b8: from storage DS-3c776736-a8f3-4474-b8e2-e005eb4ffa00 node DatanodeRegistration(127.0.0.1:41099, datanodeUuid=390180c2-d368-47b5-b469-72cd7a5213e0, infoPort=39191, infoSecurePort=0, ipcPort=34003, storageInfo=lv=-57;cid=testClusterID;nsid=459743402;c=1733260589147), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-03T21:16:32,284 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xeb424780054d004d with lease ID 0x49fced1c4ab731b7: Processing first storage report for DS-6db3b7e3-5ae9-460d-a87e-4b0c81e9d00c from datanode DatanodeRegistration(127.0.0.1:39801, datanodeUuid=c24d0376-aebc-43f6-88f4-89c8658bddef, infoPort=43641, infoSecurePort=0, ipcPort=37857, storageInfo=lv=-57;cid=testClusterID;nsid=459743402;c=1733260589147) 2024-12-03T21:16:32,285 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeb424780054d004d with lease ID 0x49fced1c4ab731b7: from storage DS-6db3b7e3-5ae9-460d-a87e-4b0c81e9d00c node DatanodeRegistration(127.0.0.1:39801, datanodeUuid=c24d0376-aebc-43f6-88f4-89c8658bddef, infoPort=43641, infoSecurePort=0, ipcPort=37857, storageInfo=lv=-57;cid=testClusterID;nsid=459743402;c=1733260589147), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:16:32,285 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa65979475a35aa46 with lease ID 0x49fced1c4ab731b8: Processing first storage report for DS-d567d093-2314-4988-84f3-1a048d505abe from datanode DatanodeRegistration(127.0.0.1:41099, datanodeUuid=390180c2-d368-47b5-b469-72cd7a5213e0, infoPort=39191, infoSecurePort=0, ipcPort=34003, storageInfo=lv=-57;cid=testClusterID;nsid=459743402;c=1733260589147) 2024-12-03T21:16:32,285 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa65979475a35aa46 with lease ID 0x49fced1c4ab731b8: from storage DS-d567d093-2314-4988-84f3-1a048d505abe node DatanodeRegistration(127.0.0.1:41099, datanodeUuid=390180c2-d368-47b5-b469-72cd7a5213e0, infoPort=39191, infoSecurePort=0, ipcPort=34003, storageInfo=lv=-57;cid=testClusterID;nsid=459743402;c=1733260589147), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-03T21:16:32,285 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xeb424780054d004d with lease ID 0x49fced1c4ab731b7: Processing first storage report for DS-a4ab159e-6b69-458c-9d0d-8a5499b6958f from datanode DatanodeRegistration(127.0.0.1:39801, datanodeUuid=c24d0376-aebc-43f6-88f4-89c8658bddef, infoPort=43641, infoSecurePort=0, ipcPort=37857, storageInfo=lv=-57;cid=testClusterID;nsid=459743402;c=1733260589147) 2024-12-03T21:16:32,285 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeb424780054d004d with lease ID 0x49fced1c4ab731b7: from storage DS-a4ab159e-6b69-458c-9d0d-8a5499b6958f node DatanodeRegistration(127.0.0.1:39801, datanodeUuid=c24d0376-aebc-43f6-88f4-89c8658bddef, infoPort=43641, infoSecurePort=0, ipcPort=37857, storageInfo=lv=-57;cid=testClusterID;nsid=459743402;c=1733260589147), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:16:32,302 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb 2024-12-03T21:16:32,442 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/cluster_2fbebfbd-0181-9d97-ac44-56e537377f9a/zookeeper_0, clientPort=52448, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/cluster_2fbebfbd-0181-9d97-ac44-56e537377f9a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/cluster_2fbebfbd-0181-9d97-ac44-56e537377f9a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T21:16:32,463 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52448 2024-12-03T21:16:32,477 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:16:32,481 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:16:32,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741825_1001 (size=7) 2024-12-03T21:16:32,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741825_1001 (size=7) 2024-12-03T21:16:33,168 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6 with version=8 2024-12-03T21:16:33,168 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/hbase-staging 2024-12-03T21:16:33,251 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-03T21:16:33,482 INFO [Time-limited test {}] client.ConnectionUtils(128): master/101545f66cbd:0 server-side Connection retries=45 2024-12-03T21:16:33,494 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T21:16:33,495 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T21:16:33,502 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T21:16:33,502 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T21:16:33,503 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T21:16:33,653 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-03T21:16:33,711 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-03T21:16:33,720 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-03T21:16:33,724 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T21:16:33,747 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 46408 (auto-detected) 2024-12-03T21:16:33,748 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-03T21:16:33,767 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41407 2024-12-03T21:16:33,787 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41407 connecting to ZooKeeper ensemble=127.0.0.1:52448 2024-12-03T21:16:33,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:414070x0, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T21:16:33,914 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41407-0x1019e5776ec0000 connected 2024-12-03T21:16:34,006 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:16:34,009 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:16:34,018 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41407-0x1019e5776ec0000, quorum=127.0.0.1:52448, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T21:16:34,022 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6, hbase.cluster.distributed=false 2024-12-03T21:16:34,044 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41407-0x1019e5776ec0000, quorum=127.0.0.1:52448, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T21:16:34,048 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41407 2024-12-03T21:16:34,049 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41407 2024-12-03T21:16:34,050 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41407 2024-12-03T21:16:34,050 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41407 2024-12-03T21:16:34,050 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41407 2024-12-03T21:16:34,136 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/101545f66cbd:0 server-side Connection retries=45 2024-12-03T21:16:34,138 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T21:16:34,138 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T21:16:34,138 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T21:16:34,138 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T21:16:34,138 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T21:16:34,141 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T21:16:34,142 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T21:16:34,143 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39079 2024-12-03T21:16:34,145 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39079 connecting to ZooKeeper ensemble=127.0.0.1:52448 2024-12-03T21:16:34,145 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:16:34,149 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:16:34,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:390790x0, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T21:16:34,163 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:390790x0, quorum=127.0.0.1:52448, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T21:16:34,163 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39079-0x1019e5776ec0001 connected 2024-12-03T21:16:34,166 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T21:16:34,173 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T21:16:34,175 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39079-0x1019e5776ec0001, quorum=127.0.0.1:52448, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T21:16:34,180 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39079-0x1019e5776ec0001, quorum=127.0.0.1:52448, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T21:16:34,181 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39079 2024-12-03T21:16:34,182 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39079 2024-12-03T21:16:34,182 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39079 2024-12-03T21:16:34,184 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39079 2024-12-03T21:16:34,187 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39079 2024-12-03T21:16:34,203 DEBUG [M:0;101545f66cbd:41407 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;101545f66cbd:41407 2024-12-03T21:16:34,204 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/101545f66cbd,41407,1733260593312 2024-12-03T21:16:34,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41407-0x1019e5776ec0000, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T21:16:34,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39079-0x1019e5776ec0001, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T21:16:34,220 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41407-0x1019e5776ec0000, quorum=127.0.0.1:52448, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/101545f66cbd,41407,1733260593312 2024-12-03T21:16:34,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39079-0x1019e5776ec0001, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T21:16:34,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41407-0x1019e5776ec0000, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:16:34,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39079-0x1019e5776ec0001, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:16:34,252 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41407-0x1019e5776ec0000, quorum=127.0.0.1:52448, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T21:16:34,253 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/101545f66cbd,41407,1733260593312 from backup master directory 2024-12-03T21:16:34,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39079-0x1019e5776ec0001, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T21:16:34,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41407-0x1019e5776ec0000, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/101545f66cbd,41407,1733260593312 2024-12-03T21:16:34,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41407-0x1019e5776ec0000, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T21:16:34,263 WARN [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T21:16:34,263 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=101545f66cbd,41407,1733260593312 2024-12-03T21:16:34,265 INFO [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-03T21:16:34,267 INFO [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-03T21:16:34,320 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/hbase.id] with ID: 4a2bf03a-6271-4e6c-ac97-cb51420d8c92 2024-12-03T21:16:34,321 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/.tmp/hbase.id 2024-12-03T21:16:34,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741826_1002 (size=42) 2024-12-03T21:16:34,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741826_1002 (size=42) 2024-12-03T21:16:34,335 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/.tmp/hbase.id]:[hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/hbase.id] 2024-12-03T21:16:34,388 INFO [master/101545f66cbd:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:16:34,394 INFO [master/101545f66cbd:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-03T21:16:34,414 INFO [master/101545f66cbd:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-12-03T21:16:34,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41407-0x1019e5776ec0000, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:16:34,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39079-0x1019e5776ec0001, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:16:34,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741827_1003 (size=196) 2024-12-03T21:16:34,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741827_1003 (size=196) 2024-12-03T21:16:34,469 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T21:16:34,471 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T21:16:34,476 INFO [master/101545f66cbd:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T21:16:34,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741828_1004 (size=1189) 2024-12-03T21:16:34,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741828_1004 (size=1189) 2024-12-03T21:16:34,521 INFO [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/MasterData/data/master/store 2024-12-03T21:16:34,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741829_1005 (size=34) 2024-12-03T21:16:34,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741829_1005 (size=34) 2024-12-03T21:16:34,544 INFO [master/101545f66cbd:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-03T21:16:34,548 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:16:34,549 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T21:16:34,549 INFO [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:16:34,550 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:16:34,551 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T21:16:34,552 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:16:34,552 INFO [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:16:34,553 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733260594549Disabling compacts and flushes for region at 1733260594549Disabling writes for close at 1733260594551 (+2 ms)Writing region close event to WAL at 1733260594552 (+1 ms)Closed at 1733260594552 2024-12-03T21:16:34,555 WARN [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/MasterData/data/master/store/.initializing 2024-12-03T21:16:34,556 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/MasterData/WALs/101545f66cbd,41407,1733260593312 2024-12-03T21:16:34,582 INFO [master/101545f66cbd:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=101545f66cbd%2C41407%2C1733260593312, suffix=, logDir=hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/MasterData/WALs/101545f66cbd,41407,1733260593312, archiveDir=hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/MasterData/oldWALs, maxLogs=10 2024-12-03T21:16:34,592 INFO [master/101545f66cbd:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C41407%2C1733260593312.1733260594587 2024-12-03T21:16:34,617 INFO [master/101545f66cbd:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/MasterData/WALs/101545f66cbd,41407,1733260593312/101545f66cbd%2C41407%2C1733260593312.1733260594587 2024-12-03T21:16:34,625 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39191:39191),(127.0.0.1/127.0.0.1:43641:43641)] 2024-12-03T21:16:34,626 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T21:16:34,626 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:16:34,629 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:16:34,630 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:16:34,664 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:16:34,687 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T21:16:34,691 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:16:34,693 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:16:34,694 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:16:34,697 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T21:16:34,697 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:16:34,698 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T21:16:34,699 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:16:34,701 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T21:16:34,701 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:16:34,702 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T21:16:34,702 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:16:34,705 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T21:16:34,705 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:16:34,706 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T21:16:34,706 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:16:34,710 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:16:34,711 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:16:34,716 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:16:34,717 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:16:34,720 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T21:16:34,724 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:16:34,728 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T21:16:34,730 INFO [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=864600, jitterRate=0.09939631819725037}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T21:16:34,736 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733260594642Initializing all the Stores at 1733260594644 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260594645 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260594645Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260594646 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260594646Cleaning up temporary data from old regions at 1733260594717 (+71 ms)Region opened successfully at 1733260594736 (+19 ms) 2024-12-03T21:16:34,738 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T21:16:34,771 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7441cf11, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=101545f66cbd/172.17.0.2:0 2024-12-03T21:16:34,798 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-03T21:16:34,808 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T21:16:34,808 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T21:16:34,810 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T21:16:34,812 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-03T21:16:34,817 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-03T21:16:34,817 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T21:16:34,848 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T21:16:34,859 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41407-0x1019e5776ec0000, quorum=127.0.0.1:52448, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T21:16:34,887 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-03T21:16:34,891 INFO [master/101545f66cbd:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T21:16:34,894 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41407-0x1019e5776ec0000, quorum=127.0.0.1:52448, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T21:16:34,904 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-03T21:16:34,907 INFO [master/101545f66cbd:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T21:16:34,912 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41407-0x1019e5776ec0000, quorum=127.0.0.1:52448, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T21:16:34,920 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-03T21:16:34,924 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41407-0x1019e5776ec0000, quorum=127.0.0.1:52448, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T21:16:34,934 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T21:16:34,959 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41407-0x1019e5776ec0000, quorum=127.0.0.1:52448, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T21:16:34,967 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T21:16:34,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39079-0x1019e5776ec0001, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T21:16:34,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39079-0x1019e5776ec0001, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:16:34,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41407-0x1019e5776ec0000, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T21:16:34,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41407-0x1019e5776ec0000, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:16:34,986 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=101545f66cbd,41407,1733260593312, sessionid=0x1019e5776ec0000, setting cluster-up flag (Was=false) 2024-12-03T21:16:35,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39079-0x1019e5776ec0001, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:16:35,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41407-0x1019e5776ec0000, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:16:35,087 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T21:16:35,089 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=101545f66cbd,41407,1733260593312 2024-12-03T21:16:35,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41407-0x1019e5776ec0000, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:16:35,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39079-0x1019e5776ec0001, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:16:35,137 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T21:16:35,139 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=101545f66cbd,41407,1733260593312 2024-12-03T21:16:35,146 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-03T21:16:35,192 INFO [RS:0;101545f66cbd:39079 {}] regionserver.HRegionServer(746): ClusterId : 4a2bf03a-6271-4e6c-ac97-cb51420d8c92 2024-12-03T21:16:35,194 DEBUG [RS:0;101545f66cbd:39079 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T21:16:35,205 DEBUG [RS:0;101545f66cbd:39079 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T21:16:35,206 DEBUG [RS:0;101545f66cbd:39079 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T21:16:35,213 DEBUG [RS:0;101545f66cbd:39079 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T21:16:35,213 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-03T21:16:35,214 DEBUG [RS:0;101545f66cbd:39079 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@583df260, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=101545f66cbd/172.17.0.2:0 2024-12-03T21:16:35,223 INFO [master/101545f66cbd:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-03T21:16:35,230 DEBUG [RS:0;101545f66cbd:39079 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;101545f66cbd:39079 2024-12-03T21:16:35,232 INFO [master/101545f66cbd:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T21:16:35,235 INFO [RS:0;101545f66cbd:39079 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T21:16:35,235 INFO [RS:0;101545f66cbd:39079 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T21:16:35,235 DEBUG [RS:0;101545f66cbd:39079 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T21:16:35,239 INFO [RS:0;101545f66cbd:39079 {}] regionserver.HRegionServer(2659): reportForDuty to master=101545f66cbd,41407,1733260593312 with port=39079, startcode=1733260594106 2024-12-03T21:16:35,239 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 101545f66cbd,41407,1733260593312 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T21:16:35,249 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/101545f66cbd:0, corePoolSize=5, maxPoolSize=5 2024-12-03T21:16:35,249 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/101545f66cbd:0, corePoolSize=5, maxPoolSize=5 2024-12-03T21:16:35,250 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/101545f66cbd:0, corePoolSize=5, maxPoolSize=5 2024-12-03T21:16:35,250 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/101545f66cbd:0, corePoolSize=5, maxPoolSize=5 2024-12-03T21:16:35,250 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/101545f66cbd:0, corePoolSize=10, maxPoolSize=10 2024-12-03T21:16:35,251 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:16:35,251 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/101545f66cbd:0, corePoolSize=2, maxPoolSize=2 2024-12-03T21:16:35,251 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:16:35,254 DEBUG [RS:0;101545f66cbd:39079 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T21:16:35,271 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733260625271 2024-12-03T21:16:35,274 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T21:16:35,275 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T21:16:35,275 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T21:16:35,276 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-03T21:16:35,280 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T21:16:35,280 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T21:16:35,281 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T21:16:35,281 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T21:16:35,283 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:16:35,284 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T21:16:35,283 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T21:16:35,288 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T21:16:35,290 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T21:16:35,290 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T21:16:35,293 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T21:16:35,294 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T21:16:35,297 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.large.0-1733260595296,5,FailOnTimeoutGroup] 2024-12-03T21:16:35,299 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.small.0-1733260595298,5,FailOnTimeoutGroup] 2024-12-03T21:16:35,299 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T21:16:35,299 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T21:16:35,301 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T21:16:35,302 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T21:16:35,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741831_1007 (size=1321) 2024-12-03T21:16:35,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741831_1007 (size=1321) 2024-12-03T21:16:35,340 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48077, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T21:16:35,345 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41407 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 101545f66cbd,39079,1733260594106 2024-12-03T21:16:35,347 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41407 {}] master.ServerManager(517): Registering regionserver=101545f66cbd,39079,1733260594106 2024-12-03T21:16:35,361 DEBUG [RS:0;101545f66cbd:39079 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6 2024-12-03T21:16:35,362 DEBUG [RS:0;101545f66cbd:39079 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43101 2024-12-03T21:16:35,362 DEBUG [RS:0;101545f66cbd:39079 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T21:16:35,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41407-0x1019e5776ec0000, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T21:16:35,371 DEBUG [RS:0;101545f66cbd:39079 {}] zookeeper.ZKUtil(111): regionserver:39079-0x1019e5776ec0001, quorum=127.0.0.1:52448, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/101545f66cbd,39079,1733260594106 2024-12-03T21:16:35,371 WARN [RS:0;101545f66cbd:39079 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T21:16:35,371 INFO [RS:0;101545f66cbd:39079 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T21:16:35,372 DEBUG [RS:0;101545f66cbd:39079 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/WALs/101545f66cbd,39079,1733260594106 2024-12-03T21:16:35,373 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [101545f66cbd,39079,1733260594106] 2024-12-03T21:16:35,398 INFO [RS:0;101545f66cbd:39079 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T21:16:35,412 INFO [RS:0;101545f66cbd:39079 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T21:16:35,417 INFO [RS:0;101545f66cbd:39079 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T21:16:35,417 INFO [RS:0;101545f66cbd:39079 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T21:16:35,418 INFO [RS:0;101545f66cbd:39079 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T21:16:35,424 INFO [RS:0;101545f66cbd:39079 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T21:16:35,426 INFO [RS:0;101545f66cbd:39079 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T21:16:35,426 DEBUG [RS:0;101545f66cbd:39079 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:16:35,426 DEBUG [RS:0;101545f66cbd:39079 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:16:35,426 DEBUG [RS:0;101545f66cbd:39079 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:16:35,427 DEBUG [RS:0;101545f66cbd:39079 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:16:35,427 DEBUG [RS:0;101545f66cbd:39079 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:16:35,427 DEBUG [RS:0;101545f66cbd:39079 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/101545f66cbd:0, corePoolSize=2, maxPoolSize=2 2024-12-03T21:16:35,427 DEBUG [RS:0;101545f66cbd:39079 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:16:35,428 DEBUG [RS:0;101545f66cbd:39079 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:16:35,428 DEBUG [RS:0;101545f66cbd:39079 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:16:35,428 DEBUG [RS:0;101545f66cbd:39079 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:16:35,428 DEBUG [RS:0;101545f66cbd:39079 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:16:35,428 DEBUG [RS:0;101545f66cbd:39079 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:16:35,428 DEBUG [RS:0;101545f66cbd:39079 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/101545f66cbd:0, corePoolSize=3, maxPoolSize=3 2024-12-03T21:16:35,429 DEBUG [RS:0;101545f66cbd:39079 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0, corePoolSize=3, maxPoolSize=3 2024-12-03T21:16:35,430 INFO [RS:0;101545f66cbd:39079 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T21:16:35,430 INFO [RS:0;101545f66cbd:39079 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T21:16:35,430 INFO [RS:0;101545f66cbd:39079 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T21:16:35,430 INFO [RS:0;101545f66cbd:39079 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T21:16:35,430 INFO [RS:0;101545f66cbd:39079 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T21:16:35,430 INFO [RS:0;101545f66cbd:39079 {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,39079,1733260594106-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T21:16:35,448 INFO [RS:0;101545f66cbd:39079 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T21:16:35,450 INFO [RS:0;101545f66cbd:39079 {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,39079,1733260594106-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T21:16:35,450 INFO [RS:0;101545f66cbd:39079 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:16:35,450 INFO [RS:0;101545f66cbd:39079 {}] regionserver.Replication(171): 101545f66cbd,39079,1733260594106 started 2024-12-03T21:16:35,465 INFO [RS:0;101545f66cbd:39079 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:16:35,466 INFO [RS:0;101545f66cbd:39079 {}] regionserver.HRegionServer(1482): Serving as 101545f66cbd,39079,1733260594106, RpcServer on 101545f66cbd/172.17.0.2:39079, sessionid=0x1019e5776ec0001 2024-12-03T21:16:35,466 DEBUG [RS:0;101545f66cbd:39079 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T21:16:35,466 DEBUG [RS:0;101545f66cbd:39079 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 101545f66cbd,39079,1733260594106 2024-12-03T21:16:35,467 DEBUG [RS:0;101545f66cbd:39079 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '101545f66cbd,39079,1733260594106' 2024-12-03T21:16:35,467 DEBUG [RS:0;101545f66cbd:39079 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T21:16:35,468 DEBUG [RS:0;101545f66cbd:39079 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T21:16:35,469 DEBUG [RS:0;101545f66cbd:39079 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T21:16:35,469 DEBUG [RS:0;101545f66cbd:39079 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T21:16:35,469 DEBUG [RS:0;101545f66cbd:39079 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 101545f66cbd,39079,1733260594106 2024-12-03T21:16:35,469 DEBUG [RS:0;101545f66cbd:39079 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '101545f66cbd,39079,1733260594106' 2024-12-03T21:16:35,469 DEBUG [RS:0;101545f66cbd:39079 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T21:16:35,470 DEBUG [RS:0;101545f66cbd:39079 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T21:16:35,471 DEBUG [RS:0;101545f66cbd:39079 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T21:16:35,471 INFO [RS:0;101545f66cbd:39079 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T21:16:35,471 INFO [RS:0;101545f66cbd:39079 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T21:16:35,579 INFO [RS:0;101545f66cbd:39079 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=101545f66cbd%2C39079%2C1733260594106, suffix=, logDir=hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/WALs/101545f66cbd,39079,1733260594106, archiveDir=hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/oldWALs, maxLogs=32 2024-12-03T21:16:35,582 INFO [RS:0;101545f66cbd:39079 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C39079%2C1733260594106.1733260595581 2024-12-03T21:16:35,593 INFO [RS:0;101545f66cbd:39079 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/WALs/101545f66cbd,39079,1733260594106/101545f66cbd%2C39079%2C1733260594106.1733260595581 2024-12-03T21:16:35,594 DEBUG [RS:0;101545f66cbd:39079 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43641:43641),(127.0.0.1/127.0.0.1:39191:39191)] 2024-12-03T21:16:35,714 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-03T21:16:35,715 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6 2024-12-03T21:16:35,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741833_1009 (size=32) 2024-12-03T21:16:35,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741833_1009 (size=32) 2024-12-03T21:16:35,732 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:16:35,744 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T21:16:35,750 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T21:16:35,750 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:16:35,751 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:16:35,751 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T21:16:35,755 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T21:16:35,756 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:16:35,757 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:16:35,757 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T21:16:35,760 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T21:16:35,760 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:16:35,761 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:16:35,761 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T21:16:35,764 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T21:16:35,764 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:16:35,765 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:16:35,765 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T21:16:35,767 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/hbase/meta/1588230740 2024-12-03T21:16:35,768 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/hbase/meta/1588230740 2024-12-03T21:16:35,771 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T21:16:35,771 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T21:16:35,772 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T21:16:35,775 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T21:16:35,779 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T21:16:35,780 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=698910, jitterRate=-0.11129026114940643}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T21:16:35,782 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733260595732Initializing all the Stores at 1733260595735 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260595735Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260595744 (+9 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260595744Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260595744Cleaning up temporary data from old regions at 1733260595771 (+27 ms)Region opened successfully at 1733260595782 (+11 ms) 2024-12-03T21:16:35,782 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T21:16:35,782 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T21:16:35,782 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T21:16:35,782 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T21:16:35,782 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T21:16:35,784 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T21:16:35,784 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733260595782Disabling compacts and flushes for region at 1733260595782Disabling writes for close at 1733260595782Writing region close event to WAL at 1733260595783 (+1 ms)Closed at 1733260595784 (+1 ms) 2024-12-03T21:16:35,787 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T21:16:35,787 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-03T21:16:35,793 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T21:16:35,800 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T21:16:35,803 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T21:16:35,956 DEBUG [101545f66cbd:41407 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-03T21:16:35,971 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=101545f66cbd,39079,1733260594106 2024-12-03T21:16:35,978 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 101545f66cbd,39079,1733260594106, state=OPENING 2024-12-03T21:16:35,992 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T21:16:36,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39079-0x1019e5776ec0001, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:16:36,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41407-0x1019e5776ec0000, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:16:36,002 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T21:16:36,002 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T21:16:36,005 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T21:16:36,008 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=101545f66cbd,39079,1733260594106}] 2024-12-03T21:16:36,188 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T21:16:36,191 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51167, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T21:16:36,204 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-03T21:16:36,204 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T21:16:36,208 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=101545f66cbd%2C39079%2C1733260594106.meta, suffix=.meta, logDir=hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/WALs/101545f66cbd,39079,1733260594106, archiveDir=hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/oldWALs, maxLogs=32 2024-12-03T21:16:36,210 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C39079%2C1733260594106.meta.1733260596209.meta 2024-12-03T21:16:36,217 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/WALs/101545f66cbd,39079,1733260594106/101545f66cbd%2C39079%2C1733260594106.meta.1733260596209.meta 2024-12-03T21:16:36,218 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43641:43641),(127.0.0.1/127.0.0.1:39191:39191)] 2024-12-03T21:16:36,219 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T21:16:36,221 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T21:16:36,223 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T21:16:36,227 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T21:16:36,231 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T21:16:36,231 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:16:36,232 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-03T21:16:36,232 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-03T21:16:36,234 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T21:16:36,236 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T21:16:36,236 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:16:36,237 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:16:36,237 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T21:16:36,239 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T21:16:36,239 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:16:36,240 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:16:36,240 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T21:16:36,242 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T21:16:36,242 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:16:36,243 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:16:36,243 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T21:16:36,244 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T21:16:36,244 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:16:36,245 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:16:36,245 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T21:16:36,247 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/hbase/meta/1588230740 2024-12-03T21:16:36,250 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/hbase/meta/1588230740 2024-12-03T21:16:36,253 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T21:16:36,253 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T21:16:36,254 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T21:16:36,258 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T21:16:36,260 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=856075, jitterRate=0.08855670690536499}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T21:16:36,260 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-03T21:16:36,261 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733260596232Writing region info on filesystem at 1733260596232Initializing all the Stores at 1733260596234 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260596234Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260596234Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260596234Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260596234Cleaning up temporary data from old regions at 1733260596253 (+19 ms)Running coprocessor post-open hooks at 1733260596260 (+7 ms)Region opened successfully at 1733260596261 (+1 ms) 2024-12-03T21:16:36,267 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733260596178 2024-12-03T21:16:36,277 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T21:16:36,278 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-03T21:16:36,279 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=101545f66cbd,39079,1733260594106 2024-12-03T21:16:36,281 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 101545f66cbd,39079,1733260594106, state=OPEN 2024-12-03T21:16:36,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39079-0x1019e5776ec0001, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T21:16:36,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41407-0x1019e5776ec0000, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T21:16:36,340 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T21:16:36,340 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T21:16:36,340 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=101545f66cbd,39079,1733260594106 2024-12-03T21:16:36,351 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T21:16:36,351 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=101545f66cbd,39079,1733260594106 in 333 msec 2024-12-03T21:16:36,359 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T21:16:36,359 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 561 msec 2024-12-03T21:16:36,360 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T21:16:36,361 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-03T21:16:36,379 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T21:16:36,380 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=101545f66cbd,39079,1733260594106, seqNum=-1] 2024-12-03T21:16:36,397 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T21:16:36,399 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51121, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T21:16:36,422 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.2450 sec 2024-12-03T21:16:36,422 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733260596422, completionTime=-1 2024-12-03T21:16:36,425 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-03T21:16:36,425 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-03T21:16:36,446 INFO [master/101545f66cbd:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-03T21:16:36,446 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733260656446 2024-12-03T21:16:36,446 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733260716446 2024-12-03T21:16:36,446 INFO [master/101545f66cbd:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 21 msec 2024-12-03T21:16:36,449 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,41407,1733260593312-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T21:16:36,449 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,41407,1733260593312-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:16:36,449 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,41407,1733260593312-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:16:36,451 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-101545f66cbd:41407, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:16:36,452 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T21:16:36,452 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-03T21:16:36,459 DEBUG [master/101545f66cbd:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-03T21:16:36,478 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.215sec 2024-12-03T21:16:36,479 INFO [master/101545f66cbd:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T21:16:36,480 INFO [master/101545f66cbd:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T21:16:36,481 INFO [master/101545f66cbd:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T21:16:36,482 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T21:16:36,482 INFO [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T21:16:36,483 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,41407,1733260593312-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T21:16:36,484 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,41407,1733260593312-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T21:16:36,493 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-03T21:16:36,494 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T21:16:36,495 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,41407,1733260593312-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:16:36,503 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a4c629c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T21:16:36,505 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-03T21:16:36,505 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-03T21:16:36,507 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 101545f66cbd,41407,-1 for getting cluster id 2024-12-03T21:16:36,510 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T21:16:36,517 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '4a2bf03a-6271-4e6c-ac97-cb51420d8c92' 2024-12-03T21:16:36,519 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T21:16:36,519 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "4a2bf03a-6271-4e6c-ac97-cb51420d8c92" 2024-12-03T21:16:36,522 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@120d12d6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T21:16:36,522 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [101545f66cbd,41407,-1] 2024-12-03T21:16:36,524 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T21:16:36,526 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:16:36,527 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46772, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T21:16:36,530 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5286c427, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T21:16:36,531 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T21:16:36,538 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=101545f66cbd,39079,1733260594106, seqNum=-1] 2024-12-03T21:16:36,539 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T21:16:36,541 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49476, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T21:16:36,561 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=101545f66cbd,41407,1733260593312 2024-12-03T21:16:36,562 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:16:36,570 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-03T21:16:36,574 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-03T21:16:36,580 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 101545f66cbd,41407,1733260593312 2024-12-03T21:16:36,582 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2bfb5e00 2024-12-03T21:16:36,584 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T21:16:36,587 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46774, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T21:16:36,589 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41407 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-03T21:16:36,589 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41407 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-03T21:16:36,592 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41407 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T21:16:36,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41407 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-12-03T21:16:36,603 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T21:16:36,605 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41407 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-12-03T21:16:36,605 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:16:36,607 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T21:16:36,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41407 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T21:16:36,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741835_1011 (size=389) 2024-12-03T21:16:36,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741835_1011 (size=389) 2024-12-03T21:16:36,668 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 235dcab78bdc25aa84b1c078e8a6d6ce, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6 2024-12-03T21:16:36,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741836_1012 (size=72) 2024-12-03T21:16:36,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741836_1012 (size=72) 2024-12-03T21:16:36,678 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:16:36,679 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 235dcab78bdc25aa84b1c078e8a6d6ce, disabling compactions & flushes 2024-12-03T21:16:36,679 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce. 2024-12-03T21:16:36,679 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce. 2024-12-03T21:16:36,679 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce. after waiting 0 ms 2024-12-03T21:16:36,679 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce. 2024-12-03T21:16:36,679 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce. 2024-12-03T21:16:36,679 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 235dcab78bdc25aa84b1c078e8a6d6ce: Waiting for close lock at 1733260596678Disabling compacts and flushes for region at 1733260596678Disabling writes for close at 1733260596679 (+1 ms)Writing region close event to WAL at 1733260596679Closed at 1733260596679 2024-12-03T21:16:36,681 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T21:16:36,686 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1733260596681"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733260596681"}]},"ts":"1733260596681"} 2024-12-03T21:16:36,691 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-03T21:16:36,693 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T21:16:36,695 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733260596693"}]},"ts":"1733260596693"} 2024-12-03T21:16:36,699 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-12-03T21:16:36,701 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=235dcab78bdc25aa84b1c078e8a6d6ce, ASSIGN}] 2024-12-03T21:16:36,704 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=235dcab78bdc25aa84b1c078e8a6d6ce, ASSIGN 2024-12-03T21:16:36,705 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=235dcab78bdc25aa84b1c078e8a6d6ce, ASSIGN; state=OFFLINE, location=101545f66cbd,39079,1733260594106; forceNewPlan=false, retain=false 2024-12-03T21:16:36,857 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=235dcab78bdc25aa84b1c078e8a6d6ce, regionState=OPENING, regionLocation=101545f66cbd,39079,1733260594106 2024-12-03T21:16:36,864 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=235dcab78bdc25aa84b1c078e8a6d6ce, ASSIGN because future has completed 2024-12-03T21:16:36,866 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 235dcab78bdc25aa84b1c078e8a6d6ce, server=101545f66cbd,39079,1733260594106}] 2024-12-03T21:16:37,032 INFO [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce. 2024-12-03T21:16:37,033 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 235dcab78bdc25aa84b1c078e8a6d6ce, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce.', STARTKEY => '', ENDKEY => ''} 2024-12-03T21:16:37,033 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 235dcab78bdc25aa84b1c078e8a6d6ce 2024-12-03T21:16:37,033 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:16:37,034 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 235dcab78bdc25aa84b1c078e8a6d6ce 2024-12-03T21:16:37,034 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 235dcab78bdc25aa84b1c078e8a6d6ce 2024-12-03T21:16:37,038 INFO [StoreOpener-235dcab78bdc25aa84b1c078e8a6d6ce-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 235dcab78bdc25aa84b1c078e8a6d6ce 2024-12-03T21:16:37,040 INFO [StoreOpener-235dcab78bdc25aa84b1c078e8a6d6ce-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 235dcab78bdc25aa84b1c078e8a6d6ce columnFamilyName info 2024-12-03T21:16:37,041 DEBUG [StoreOpener-235dcab78bdc25aa84b1c078e8a6d6ce-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:16:37,042 INFO [StoreOpener-235dcab78bdc25aa84b1c078e8a6d6ce-1 {}] regionserver.HStore(327): Store=235dcab78bdc25aa84b1c078e8a6d6ce/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T21:16:37,042 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 235dcab78bdc25aa84b1c078e8a6d6ce 2024-12-03T21:16:37,045 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce 2024-12-03T21:16:37,046 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce 2024-12-03T21:16:37,046 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 235dcab78bdc25aa84b1c078e8a6d6ce 2024-12-03T21:16:37,047 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 235dcab78bdc25aa84b1c078e8a6d6ce 2024-12-03T21:16:37,049 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 235dcab78bdc25aa84b1c078e8a6d6ce 2024-12-03T21:16:37,052 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T21:16:37,053 INFO [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 235dcab78bdc25aa84b1c078e8a6d6ce; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=763805, jitterRate=-0.02877199649810791}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T21:16:37,053 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 235dcab78bdc25aa84b1c078e8a6d6ce 2024-12-03T21:16:37,054 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 235dcab78bdc25aa84b1c078e8a6d6ce: Running coprocessor pre-open hook at 1733260597034Writing region info on filesystem at 1733260597034Initializing all the Stores at 1733260597037 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260597037Cleaning up temporary data from old regions at 1733260597047 (+10 ms)Running coprocessor post-open hooks at 1733260597053 (+6 ms)Region opened successfully at 1733260597054 (+1 ms) 2024-12-03T21:16:37,056 INFO [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce., pid=6, masterSystemTime=1733260597022 2024-12-03T21:16:37,060 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce. 2024-12-03T21:16:37,060 INFO [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce. 2024-12-03T21:16:37,061 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=235dcab78bdc25aa84b1c078e8a6d6ce, regionState=OPEN, openSeqNum=2, regionLocation=101545f66cbd,39079,1733260594106 2024-12-03T21:16:37,065 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 235dcab78bdc25aa84b1c078e8a6d6ce, server=101545f66cbd,39079,1733260594106 because future has completed 2024-12-03T21:16:37,074 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-03T21:16:37,076 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 235dcab78bdc25aa84b1c078e8a6d6ce, server=101545f66cbd,39079,1733260594106 in 202 msec 2024-12-03T21:16:37,080 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-03T21:16:37,080 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=235dcab78bdc25aa84b1c078e8a6d6ce, ASSIGN in 373 msec 2024-12-03T21:16:37,082 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T21:16:37,082 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733260597082"}]},"ts":"1733260597082"} 2024-12-03T21:16:37,087 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-12-03T21:16:37,089 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T21:16:37,093 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 494 msec 2024-12-03T21:16:41,747 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-03T21:16:41,828 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-03T21:16:41,830 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-12-03T21:16:43,708 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-03T21:16:43,709 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-03T21:16:43,712 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-03T21:16:43,712 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-03T21:16:43,714 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T21:16:43,714 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-03T21:16:43,715 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-03T21:16:43,715 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-03T21:16:46,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41407 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T21:16:46,647 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-12-03T21:16:46,651 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-12-03T21:16:46,658 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-12-03T21:16:46,659 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce. 2024-12-03T21:16:46,660 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C39079%2C1733260594106.1733260606660 2024-12-03T21:16:46,670 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:16:46,670 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:16:46,670 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:16:46,670 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:16:46,670 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:16:46,671 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/WALs/101545f66cbd,39079,1733260594106/101545f66cbd%2C39079%2C1733260594106.1733260595581 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/WALs/101545f66cbd,39079,1733260594106/101545f66cbd%2C39079%2C1733260594106.1733260606660 2024-12-03T21:16:46,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741832_1008 (size=451) 2024-12-03T21:16:46,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741832_1008 (size=451) 2024-12-03T21:16:46,680 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43641:43641),(127.0.0.1/127.0.0.1:39191:39191)] 2024-12-03T21:16:46,680 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/WALs/101545f66cbd,39079,1733260594106/101545f66cbd%2C39079%2C1733260594106.1733260595581 is not closed yet, will try archiving it next time 2024-12-03T21:16:46,682 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/WALs/101545f66cbd,39079,1733260594106/101545f66cbd%2C39079%2C1733260594106.1733260595581 to hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/oldWALs/101545f66cbd%2C39079%2C1733260594106.1733260595581 2024-12-03T21:16:46,691 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce., hostname=101545f66cbd,39079,1733260594106, seqNum=2] 2024-12-03T21:16:58,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39079 {}] regionserver.HRegion(8855): Flush requested on 235dcab78bdc25aa84b1c078e8a6d6ce 2024-12-03T21:16:58,739 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 235dcab78bdc25aa84b1c078e8a6d6ce 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T21:16:58,796 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/.tmp/info/d052b60c9f8b47598ba7191e09838be8 is 1080, key is row0001/info:/1733260606695/Put/seqid=0 2024-12-03T21:16:58,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741838_1014 (size=12509) 2024-12-03T21:16:58,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741838_1014 (size=12509) 2024-12-03T21:16:58,808 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/.tmp/info/d052b60c9f8b47598ba7191e09838be8 2024-12-03T21:16:58,852 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/.tmp/info/d052b60c9f8b47598ba7191e09838be8 as hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/info/d052b60c9f8b47598ba7191e09838be8 2024-12-03T21:16:58,864 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/info/d052b60c9f8b47598ba7191e09838be8, entries=7, sequenceid=11, filesize=12.2 K 2024-12-03T21:16:58,873 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 235dcab78bdc25aa84b1c078e8a6d6ce in 134ms, sequenceid=11, compaction requested=false 2024-12-03T21:16:58,874 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 235dcab78bdc25aa84b1c078e8a6d6ce: 2024-12-03T21:17:02,307 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T21:17:06,753 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C39079%2C1733260594106.1733260626752 2024-12-03T21:17:06,965 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 209 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39801,DS-6db3b7e3-5ae9-460d-a87e-4b0c81e9d00c,DISK], DatanodeInfoWithStorage[127.0.0.1:41099,DS-3c776736-a8f3-4474-b8e2-e005eb4ffa00,DISK]] 2024-12-03T21:17:06,965 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:17:06,965 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:17:06,966 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:17:06,966 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:17:06,966 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:17:06,966 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/WALs/101545f66cbd,39079,1733260594106/101545f66cbd%2C39079%2C1733260594106.1733260606660 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/WALs/101545f66cbd,39079,1733260594106/101545f66cbd%2C39079%2C1733260594106.1733260626752 2024-12-03T21:17:06,967 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43641:43641),(127.0.0.1/127.0.0.1:39191:39191)] 2024-12-03T21:17:06,967 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/WALs/101545f66cbd,39079,1733260594106/101545f66cbd%2C39079%2C1733260594106.1733260606660 is not closed yet, will try archiving it next time 2024-12-03T21:17:06,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741837_1013 (size=12399) 2024-12-03T21:17:06,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741837_1013 (size=12399) 2024-12-03T21:17:07,171 INFO [FSHLog-0-hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6-prefix:101545f66cbd,39079,1733260594106 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39801,DS-6db3b7e3-5ae9-460d-a87e-4b0c81e9d00c,DISK], DatanodeInfoWithStorage[127.0.0.1:41099,DS-3c776736-a8f3-4474-b8e2-e005eb4ffa00,DISK]] 2024-12-03T21:17:09,376 INFO [FSHLog-0-hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6-prefix:101545f66cbd,39079,1733260594106 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39801,DS-6db3b7e3-5ae9-460d-a87e-4b0c81e9d00c,DISK], DatanodeInfoWithStorage[127.0.0.1:41099,DS-3c776736-a8f3-4474-b8e2-e005eb4ffa00,DISK]] 2024-12-03T21:17:11,580 INFO [FSHLog-0-hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6-prefix:101545f66cbd,39079,1733260594106 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39801,DS-6db3b7e3-5ae9-460d-a87e-4b0c81e9d00c,DISK], DatanodeInfoWithStorage[127.0.0.1:41099,DS-3c776736-a8f3-4474-b8e2-e005eb4ffa00,DISK]] 2024-12-03T21:17:13,785 INFO [FSHLog-0-hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6-prefix:101545f66cbd,39079,1733260594106 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39801,DS-6db3b7e3-5ae9-460d-a87e-4b0c81e9d00c,DISK], DatanodeInfoWithStorage[127.0.0.1:41099,DS-3c776736-a8f3-4474-b8e2-e005eb4ffa00,DISK]] 2024-12-03T21:17:13,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39079 {}] regionserver.HRegion(8855): Flush requested on 235dcab78bdc25aa84b1c078e8a6d6ce 2024-12-03T21:17:13,785 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 235dcab78bdc25aa84b1c078e8a6d6ce 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T21:17:13,988 INFO [FSHLog-0-hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6-prefix:101545f66cbd,39079,1733260594106 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39801,DS-6db3b7e3-5ae9-460d-a87e-4b0c81e9d00c,DISK], DatanodeInfoWithStorage[127.0.0.1:41099,DS-3c776736-a8f3-4474-b8e2-e005eb4ffa00,DISK]] 2024-12-03T21:17:13,998 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/.tmp/info/334e2c3c536a48ec81fa9440a75e4a5b is 1080, key is row0008/info:/1733260620736/Put/seqid=0 2024-12-03T21:17:14,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741840_1016 (size=12509) 2024-12-03T21:17:14,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741840_1016 (size=12509) 2024-12-03T21:17:14,008 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/.tmp/info/334e2c3c536a48ec81fa9440a75e4a5b 2024-12-03T21:17:14,018 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/.tmp/info/334e2c3c536a48ec81fa9440a75e4a5b as hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/info/334e2c3c536a48ec81fa9440a75e4a5b 2024-12-03T21:17:14,027 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/info/334e2c3c536a48ec81fa9440a75e4a5b, entries=7, sequenceid=21, filesize=12.2 K 2024-12-03T21:17:14,230 INFO [FSHLog-0-hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6-prefix:101545f66cbd,39079,1733260594106 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39801,DS-6db3b7e3-5ae9-460d-a87e-4b0c81e9d00c,DISK], DatanodeInfoWithStorage[127.0.0.1:41099,DS-3c776736-a8f3-4474-b8e2-e005eb4ffa00,DISK]] 2024-12-03T21:17:14,230 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 235dcab78bdc25aa84b1c078e8a6d6ce in 445ms, sequenceid=21, compaction requested=false 2024-12-03T21:17:14,231 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 235dcab78bdc25aa84b1c078e8a6d6ce: 2024-12-03T21:17:14,231 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-12-03T21:17:14,231 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T21:17:14,233 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/info/d052b60c9f8b47598ba7191e09838be8 because midkey is the same as first or last row 2024-12-03T21:17:15,989 INFO [FSHLog-0-hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6-prefix:101545f66cbd,39079,1733260594106 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39801,DS-6db3b7e3-5ae9-460d-a87e-4b0c81e9d00c,DISK], DatanodeInfoWithStorage[127.0.0.1:41099,DS-3c776736-a8f3-4474-b8e2-e005eb4ffa00,DISK]] 2024-12-03T21:17:16,499 INFO [master/101545f66cbd:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-03T21:17:16,499 INFO [master/101545f66cbd:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-03T21:17:18,194 INFO [FSHLog-0-hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6-prefix:101545f66cbd,39079,1733260594106 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39801,DS-6db3b7e3-5ae9-460d-a87e-4b0c81e9d00c,DISK], DatanodeInfoWithStorage[127.0.0.1:41099,DS-3c776736-a8f3-4474-b8e2-e005eb4ffa00,DISK]] 2024-12-03T21:17:18,199 WARN [FSHLog-0-hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6-prefix:101545f66cbd,39079,1733260594106 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39801,DS-6db3b7e3-5ae9-460d-a87e-4b0c81e9d00c,DISK], DatanodeInfoWithStorage[127.0.0.1:41099,DS-3c776736-a8f3-4474-b8e2-e005eb4ffa00,DISK]] 2024-12-03T21:17:18,200 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 101545f66cbd%2C39079%2C1733260594106:(num 1733260626752) roll requested 2024-12-03T21:17:18,201 INFO [regionserver/101545f66cbd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C39079%2C1733260594106.1733260638201 2024-12-03T21:17:18,410 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39801,DS-6db3b7e3-5ae9-460d-a87e-4b0c81e9d00c,DISK], DatanodeInfoWithStorage[127.0.0.1:41099,DS-3c776736-a8f3-4474-b8e2-e005eb4ffa00,DISK]] 2024-12-03T21:17:18,410 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:17:18,410 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:17:18,410 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:17:18,411 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:17:18,411 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:17:18,411 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/WALs/101545f66cbd,39079,1733260594106/101545f66cbd%2C39079%2C1733260594106.1733260626752 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/WALs/101545f66cbd,39079,1733260594106/101545f66cbd%2C39079%2C1733260594106.1733260638201 2024-12-03T21:17:18,412 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43641:43641),(127.0.0.1/127.0.0.1:39191:39191)] 2024-12-03T21:17:18,412 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/WALs/101545f66cbd,39079,1733260594106/101545f66cbd%2C39079%2C1733260594106.1733260626752 is not closed yet, will try archiving it next time 2024-12-03T21:17:18,412 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/WALs/101545f66cbd,39079,1733260594106/101545f66cbd%2C39079%2C1733260594106.1733260606660 to hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/oldWALs/101545f66cbd%2C39079%2C1733260594106.1733260606660 2024-12-03T21:17:18,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741839_1015 (size=7739) 2024-12-03T21:17:18,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741839_1015 (size=7739) 2024-12-03T21:17:20,399 INFO [FSHLog-0-hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6-prefix:101545f66cbd,39079,1733260594106 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39801,DS-6db3b7e3-5ae9-460d-a87e-4b0c81e9d00c,DISK], DatanodeInfoWithStorage[127.0.0.1:41099,DS-3c776736-a8f3-4474-b8e2-e005eb4ffa00,DISK]] 2024-12-03T21:17:22,034 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 235dcab78bdc25aa84b1c078e8a6d6ce, had cached 0 bytes from a total of 25018 2024-12-03T21:17:22,604 INFO [FSHLog-0-hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6-prefix:101545f66cbd,39079,1733260594106 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39801,DS-6db3b7e3-5ae9-460d-a87e-4b0c81e9d00c,DISK], DatanodeInfoWithStorage[127.0.0.1:41099,DS-3c776736-a8f3-4474-b8e2-e005eb4ffa00,DISK]] 2024-12-03T21:17:24,808 INFO [FSHLog-0-hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6-prefix:101545f66cbd,39079,1733260594106 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39801,DS-6db3b7e3-5ae9-460d-a87e-4b0c81e9d00c,DISK], DatanodeInfoWithStorage[127.0.0.1:41099,DS-3c776736-a8f3-4474-b8e2-e005eb4ffa00,DISK]] 2024-12-03T21:17:27,012 INFO [FSHLog-0-hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6-prefix:101545f66cbd,39079,1733260594106 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39801,DS-6db3b7e3-5ae9-460d-a87e-4b0c81e9d00c,DISK], DatanodeInfoWithStorage[127.0.0.1:41099,DS-3c776736-a8f3-4474-b8e2-e005eb4ffa00,DISK]] 2024-12-03T21:17:29,014 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T21:17:29,014 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C39079%2C1733260594106.1733260649014 2024-12-03T21:17:32,308 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T21:17:34,027 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5010 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39801,DS-6db3b7e3-5ae9-460d-a87e-4b0c81e9d00c,DISK], DatanodeInfoWithStorage[127.0.0.1:41099,DS-3c776736-a8f3-4474-b8e2-e005eb4ffa00,DISK]] 2024-12-03T21:17:34,031 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5010 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39801,DS-6db3b7e3-5ae9-460d-a87e-4b0c81e9d00c,DISK], DatanodeInfoWithStorage[127.0.0.1:41099,DS-3c776736-a8f3-4474-b8e2-e005eb4ffa00,DISK]] 2024-12-03T21:17:34,031 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 101545f66cbd%2C39079%2C1733260594106:(num 1733260649014) roll requested 2024-12-03T21:17:34,031 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:17:34,031 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:17:34,032 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:17:34,032 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:17:34,032 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:17:34,032 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/WALs/101545f66cbd,39079,1733260594106/101545f66cbd%2C39079%2C1733260594106.1733260638201 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/WALs/101545f66cbd,39079,1733260594106/101545f66cbd%2C39079%2C1733260594106.1733260649014 2024-12-03T21:17:34,033 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39191:39191),(127.0.0.1/127.0.0.1:43641:43641)] 2024-12-03T21:17:34,033 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/WALs/101545f66cbd,39079,1733260594106/101545f66cbd%2C39079%2C1733260594106.1733260638201 is not closed yet, will try archiving it next time 2024-12-03T21:17:34,033 INFO [regionserver/101545f66cbd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C39079%2C1733260594106.1733260654033 2024-12-03T21:17:34,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741841_1017 (size=4753) 2024-12-03T21:17:34,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741841_1017 (size=4753) 2024-12-03T21:17:39,037 INFO [FSHLog-0-hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6-prefix:101545f66cbd,39079,1733260594106 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41099,DS-3c776736-a8f3-4474-b8e2-e005eb4ffa00,DISK], DatanodeInfoWithStorage[127.0.0.1:39801,DS-6db3b7e3-5ae9-460d-a87e-4b0c81e9d00c,DISK]] 2024-12-03T21:17:39,037 WARN [FSHLog-0-hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6-prefix:101545f66cbd,39079,1733260594106 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41099,DS-3c776736-a8f3-4474-b8e2-e005eb4ffa00,DISK], DatanodeInfoWithStorage[127.0.0.1:39801,DS-6db3b7e3-5ae9-460d-a87e-4b0c81e9d00c,DISK]] 2024-12-03T21:17:39,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39079 {}] regionserver.HRegion(8855): Flush requested on 235dcab78bdc25aa84b1c078e8a6d6ce 2024-12-03T21:17:39,037 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 235dcab78bdc25aa84b1c078e8a6d6ce 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T21:17:39,043 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41099,DS-3c776736-a8f3-4474-b8e2-e005eb4ffa00,DISK], DatanodeInfoWithStorage[127.0.0.1:39801,DS-6db3b7e3-5ae9-460d-a87e-4b0c81e9d00c,DISK]] 2024-12-03T21:17:39,043 WARN [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41099,DS-3c776736-a8f3-4474-b8e2-e005eb4ffa00,DISK], DatanodeInfoWithStorage[127.0.0.1:39801,DS-6db3b7e3-5ae9-460d-a87e-4b0c81e9d00c,DISK]] 2024-12-03T21:17:41,038 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T21:17:44,039 INFO [FSHLog-0-hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6-prefix:101545f66cbd,39079,1733260594106 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41099,DS-3c776736-a8f3-4474-b8e2-e005eb4ffa00,DISK], DatanodeInfoWithStorage[127.0.0.1:39801,DS-6db3b7e3-5ae9-460d-a87e-4b0c81e9d00c,DISK]] 2024-12-03T21:17:44,040 WARN [FSHLog-0-hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6-prefix:101545f66cbd,39079,1733260594106 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41099,DS-3c776736-a8f3-4474-b8e2-e005eb4ffa00,DISK], DatanodeInfoWithStorage[127.0.0.1:39801,DS-6db3b7e3-5ae9-460d-a87e-4b0c81e9d00c,DISK]] 2024-12-03T21:17:44,040 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:17:44,040 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:17:44,040 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:17:44,040 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:17:44,040 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:17:44,041 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/WALs/101545f66cbd,39079,1733260594106/101545f66cbd%2C39079%2C1733260594106.1733260649014 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/WALs/101545f66cbd,39079,1733260594106/101545f66cbd%2C39079%2C1733260594106.1733260654033 2024-12-03T21:17:44,042 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43641:43641),(127.0.0.1/127.0.0.1:39191:39191)] 2024-12-03T21:17:44,042 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/WALs/101545f66cbd,39079,1733260594106/101545f66cbd%2C39079%2C1733260594106.1733260649014 is not closed yet, will try archiving it next time 2024-12-03T21:17:44,042 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 101545f66cbd%2C39079%2C1733260594106:(num 1733260664042) roll requested 2024-12-03T21:17:44,042 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C39079%2C1733260594106.1733260664042 2024-12-03T21:17:44,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741842_1018 (size=1569) 2024-12-03T21:17:44,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741842_1018 (size=1569) 2024-12-03T21:17:44,045 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/.tmp/info/552cf4c69b4045aab34c2558424518d3 is 1080, key is row0015/info:/1733260635787/Put/seqid=0 2024-12-03T21:17:44,053 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:17:44,053 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:17:44,053 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:17:44,054 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:17:44,054 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:17:44,054 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/WALs/101545f66cbd,39079,1733260594106/101545f66cbd%2C39079%2C1733260594106.1733260654033 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/WALs/101545f66cbd,39079,1733260594106/101545f66cbd%2C39079%2C1733260594106.1733260664042 2024-12-03T21:17:44,055 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43641:43641),(127.0.0.1/127.0.0.1:39191:39191)] 2024-12-03T21:17:44,055 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/WALs/101545f66cbd,39079,1733260594106/101545f66cbd%2C39079%2C1733260594106.1733260654033 is not closed yet, will try archiving it next time 2024-12-03T21:17:44,056 INFO [regionserver/101545f66cbd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C39079%2C1733260594106.1733260664055 2024-12-03T21:17:44,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741843_1019 (size=93) 2024-12-03T21:17:44,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741843_1019 (size=93) 2024-12-03T21:17:44,061 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/WALs/101545f66cbd,39079,1733260594106/101545f66cbd%2C39079%2C1733260594106.1733260654033 to hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/oldWALs/101545f66cbd%2C39079%2C1733260594106.1733260654033 2024-12-03T21:17:44,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741845_1021 (size=12509) 2024-12-03T21:17:44,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741845_1021 (size=12509) 2024-12-03T21:17:44,065 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/.tmp/info/552cf4c69b4045aab34c2558424518d3 2024-12-03T21:17:44,071 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:17:44,071 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:17:44,071 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:17:44,071 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:17:44,075 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:17:44,076 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/WALs/101545f66cbd,39079,1733260594106/101545f66cbd%2C39079%2C1733260594106.1733260664042 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/WALs/101545f66cbd,39079,1733260594106/101545f66cbd%2C39079%2C1733260594106.1733260664055 2024-12-03T21:17:44,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741844_1020 (size=1258) 2024-12-03T21:17:44,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741844_1020 (size=1258) 2024-12-03T21:17:44,080 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43641:43641),(127.0.0.1/127.0.0.1:39191:39191)] 2024-12-03T21:17:44,080 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/.tmp/info/552cf4c69b4045aab34c2558424518d3 as hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/info/552cf4c69b4045aab34c2558424518d3 2024-12-03T21:17:44,093 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/info/552cf4c69b4045aab34c2558424518d3, entries=7, sequenceid=31, filesize=12.2 K 2024-12-03T21:17:44,095 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=1.05 KB/1076 for 235dcab78bdc25aa84b1c078e8a6d6ce in 5058ms, sequenceid=31, compaction requested=true 2024-12-03T21:17:44,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 235dcab78bdc25aa84b1c078e8a6d6ce: 2024-12-03T21:17:44,096 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-12-03T21:17:44,096 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T21:17:44,096 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/info/d052b60c9f8b47598ba7191e09838be8 because midkey is the same as first or last row 2024-12-03T21:17:44,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 235dcab78bdc25aa84b1c078e8a6d6ce:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T21:17:44,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T21:17:44,100 DEBUG [RS:0;101545f66cbd:39079-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T21:17:44,104 DEBUG [RS:0;101545f66cbd:39079-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T21:17:44,106 DEBUG [RS:0;101545f66cbd:39079-shortCompactions-0 {}] regionserver.HStore(1541): 235dcab78bdc25aa84b1c078e8a6d6ce/info is initiating minor compaction (all files) 2024-12-03T21:17:44,106 INFO [RS:0;101545f66cbd:39079-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 235dcab78bdc25aa84b1c078e8a6d6ce/info in TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce. 2024-12-03T21:17:44,107 INFO [RS:0;101545f66cbd:39079-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/info/d052b60c9f8b47598ba7191e09838be8, hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/info/334e2c3c536a48ec81fa9440a75e4a5b, hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/info/552cf4c69b4045aab34c2558424518d3] into tmpdir=hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/.tmp, totalSize=36.6 K 2024-12-03T21:17:44,108 DEBUG [RS:0;101545f66cbd:39079-shortCompactions-0 {}] compactions.Compactor(225): Compacting d052b60c9f8b47598ba7191e09838be8, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733260606695 2024-12-03T21:17:44,109 DEBUG [RS:0;101545f66cbd:39079-shortCompactions-0 {}] compactions.Compactor(225): Compacting 334e2c3c536a48ec81fa9440a75e4a5b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1733260620736 2024-12-03T21:17:44,110 DEBUG [RS:0;101545f66cbd:39079-shortCompactions-0 {}] compactions.Compactor(225): Compacting 552cf4c69b4045aab34c2558424518d3, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1733260635787 2024-12-03T21:17:44,139 INFO [RS:0;101545f66cbd:39079-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 235dcab78bdc25aa84b1c078e8a6d6ce#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T21:17:44,140 DEBUG [RS:0;101545f66cbd:39079-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/.tmp/info/ab2500463d8b4cd7ac2adae9d36d141f is 1080, key is row0001/info:/1733260606695/Put/seqid=0 2024-12-03T21:17:44,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741847_1023 (size=27710) 2024-12-03T21:17:44,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741847_1023 (size=27710) 2024-12-03T21:17:44,158 DEBUG [RS:0;101545f66cbd:39079-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/.tmp/info/ab2500463d8b4cd7ac2adae9d36d141f as hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/info/ab2500463d8b4cd7ac2adae9d36d141f 2024-12-03T21:17:44,176 INFO [RS:0;101545f66cbd:39079-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 235dcab78bdc25aa84b1c078e8a6d6ce/info of 235dcab78bdc25aa84b1c078e8a6d6ce into ab2500463d8b4cd7ac2adae9d36d141f(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T21:17:44,176 DEBUG [RS:0;101545f66cbd:39079-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 235dcab78bdc25aa84b1c078e8a6d6ce: 2024-12-03T21:17:44,179 INFO [RS:0;101545f66cbd:39079-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce., storeName=235dcab78bdc25aa84b1c078e8a6d6ce/info, priority=13, startTime=1733260664097; duration=0sec 2024-12-03T21:17:44,179 DEBUG [RS:0;101545f66cbd:39079-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-03T21:17:44,179 DEBUG [RS:0;101545f66cbd:39079-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T21:17:44,179 DEBUG [RS:0;101545f66cbd:39079-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/info/ab2500463d8b4cd7ac2adae9d36d141f because midkey is the same as first or last row 2024-12-03T21:17:44,180 DEBUG [RS:0;101545f66cbd:39079-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-03T21:17:44,180 DEBUG [RS:0;101545f66cbd:39079-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T21:17:44,180 DEBUG [RS:0;101545f66cbd:39079-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/info/ab2500463d8b4cd7ac2adae9d36d141f because midkey is the same as first or last row 2024-12-03T21:17:44,180 DEBUG [RS:0;101545f66cbd:39079-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-03T21:17:44,180 DEBUG [RS:0;101545f66cbd:39079-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T21:17:44,180 DEBUG [RS:0;101545f66cbd:39079-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/info/ab2500463d8b4cd7ac2adae9d36d141f because midkey is the same as first or last row 2024-12-03T21:17:44,180 DEBUG [RS:0;101545f66cbd:39079-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T21:17:44,181 DEBUG [RS:0;101545f66cbd:39079-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 235dcab78bdc25aa84b1c078e8a6d6ce:info 2024-12-03T21:17:56,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39079 {}] regionserver.HRegion(8855): Flush requested on 235dcab78bdc25aa84b1c078e8a6d6ce 2024-12-03T21:17:56,088 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 235dcab78bdc25aa84b1c078e8a6d6ce 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T21:17:56,094 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/.tmp/info/78183dd21aff4b9dba37ea901253184b is 1080, key is row0022/info:/1733260664057/Put/seqid=0 2024-12-03T21:17:56,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741848_1024 (size=12509) 2024-12-03T21:17:56,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741848_1024 (size=12509) 2024-12-03T21:17:56,102 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/.tmp/info/78183dd21aff4b9dba37ea901253184b 2024-12-03T21:17:56,111 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/.tmp/info/78183dd21aff4b9dba37ea901253184b as hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/info/78183dd21aff4b9dba37ea901253184b 2024-12-03T21:17:56,120 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/info/78183dd21aff4b9dba37ea901253184b, entries=7, sequenceid=42, filesize=12.2 K 2024-12-03T21:17:56,121 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 235dcab78bdc25aa84b1c078e8a6d6ce in 33ms, sequenceid=42, compaction requested=false 2024-12-03T21:17:56,121 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 235dcab78bdc25aa84b1c078e8a6d6ce: 2024-12-03T21:17:56,122 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-12-03T21:17:56,122 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T21:17:56,122 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/info/ab2500463d8b4cd7ac2adae9d36d141f because midkey is the same as first or last row 2024-12-03T21:18:02,308 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T21:18:04,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-03T21:18:04,108 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T21:18:04,108 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T21:18:04,113 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:18:04,113 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:18:04,113 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T21:18:04,114 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T21:18:04,114 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2125010042, stopped=false 2024-12-03T21:18:04,114 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=101545f66cbd,41407,1733260593312 2024-12-03T21:18:04,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39079-0x1019e5776ec0001, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T21:18:04,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41407-0x1019e5776ec0000, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T21:18:04,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39079-0x1019e5776ec0001, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:04,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41407-0x1019e5776ec0000, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:04,123 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T21:18:04,123 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T21:18:04,124 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T21:18:04,124 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:18:04,124 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41407-0x1019e5776ec0000, quorum=127.0.0.1:52448, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T21:18:04,124 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39079-0x1019e5776ec0001, quorum=127.0.0.1:52448, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T21:18:04,124 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '101545f66cbd,39079,1733260594106' ***** 2024-12-03T21:18:04,124 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T21:18:04,124 INFO [RS:0;101545f66cbd:39079 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T21:18:04,125 INFO [RS:0;101545f66cbd:39079 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T21:18:04,125 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T21:18:04,125 INFO [RS:0;101545f66cbd:39079 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T21:18:04,125 INFO [RS:0;101545f66cbd:39079 {}] regionserver.HRegionServer(3091): Received CLOSE for 235dcab78bdc25aa84b1c078e8a6d6ce 2024-12-03T21:18:04,125 INFO [RS:0;101545f66cbd:39079 {}] regionserver.HRegionServer(959): stopping server 101545f66cbd,39079,1733260594106 2024-12-03T21:18:04,125 INFO [RS:0;101545f66cbd:39079 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T21:18:04,125 INFO [RS:0;101545f66cbd:39079 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;101545f66cbd:39079. 2024-12-03T21:18:04,126 DEBUG [RS:0;101545f66cbd:39079 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T21:18:04,126 DEBUG [RS:0;101545f66cbd:39079 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:18:04,126 INFO [RS:0;101545f66cbd:39079 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T21:18:04,126 INFO [RS:0;101545f66cbd:39079 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T21:18:04,126 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 235dcab78bdc25aa84b1c078e8a6d6ce, disabling compactions & flushes 2024-12-03T21:18:04,126 INFO [RS:0;101545f66cbd:39079 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T21:18:04,126 INFO [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce. 2024-12-03T21:18:04,126 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce. 2024-12-03T21:18:04,126 INFO [RS:0;101545f66cbd:39079 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-03T21:18:04,126 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce. after waiting 0 ms 2024-12-03T21:18:04,126 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce. 2024-12-03T21:18:04,126 INFO [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 235dcab78bdc25aa84b1c078e8a6d6ce 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-12-03T21:18:04,126 INFO [RS:0;101545f66cbd:39079 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-03T21:18:04,126 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T21:18:04,126 DEBUG [RS:0;101545f66cbd:39079 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 235dcab78bdc25aa84b1c078e8a6d6ce=TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce.} 2024-12-03T21:18:04,126 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T21:18:04,126 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T21:18:04,126 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T21:18:04,126 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T21:18:04,127 DEBUG [RS:0;101545f66cbd:39079 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 235dcab78bdc25aa84b1c078e8a6d6ce 2024-12-03T21:18:04,127 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-12-03T21:18:04,134 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/.tmp/info/aaf3b07461c145dd96c33ef2f674bd05 is 1080, key is row0029/info:/1733260678092/Put/seqid=0 2024-12-03T21:18:04,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741849_1025 (size=8193) 2024-12-03T21:18:04,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741849_1025 (size=8193) 2024-12-03T21:18:04,143 INFO [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/.tmp/info/aaf3b07461c145dd96c33ef2f674bd05 2024-12-03T21:18:04,150 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/hbase/meta/1588230740/.tmp/info/b016f98f5858481bb7945742c523f727 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce./info:regioninfo/1733260597061/Put/seqid=0 2024-12-03T21:18:04,152 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/.tmp/info/aaf3b07461c145dd96c33ef2f674bd05 as hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/info/aaf3b07461c145dd96c33ef2f674bd05 2024-12-03T21:18:04,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741850_1026 (size=7016) 2024-12-03T21:18:04,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741850_1026 (size=7016) 2024-12-03T21:18:04,157 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/hbase/meta/1588230740/.tmp/info/b016f98f5858481bb7945742c523f727 2024-12-03T21:18:04,161 INFO [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/info/aaf3b07461c145dd96c33ef2f674bd05, entries=3, sequenceid=48, filesize=8.0 K 2024-12-03T21:18:04,163 INFO [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 235dcab78bdc25aa84b1c078e8a6d6ce in 37ms, sequenceid=48, compaction requested=true 2024-12-03T21:18:04,164 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/info/d052b60c9f8b47598ba7191e09838be8, hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/info/334e2c3c536a48ec81fa9440a75e4a5b, hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/info/552cf4c69b4045aab34c2558424518d3] to archive 2024-12-03T21:18:04,167 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T21:18:04,170 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/info/d052b60c9f8b47598ba7191e09838be8 to hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/archive/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/info/d052b60c9f8b47598ba7191e09838be8 2024-12-03T21:18:04,172 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/info/334e2c3c536a48ec81fa9440a75e4a5b to hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/archive/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/info/334e2c3c536a48ec81fa9440a75e4a5b 2024-12-03T21:18:04,174 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/info/552cf4c69b4045aab34c2558424518d3 to hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/archive/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/info/552cf4c69b4045aab34c2558424518d3 2024-12-03T21:18:04,181 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/hbase/meta/1588230740/.tmp/ns/6a700e5eb4a8462db2c7ffe66c767616 is 43, key is default/ns:d/1733260596403/Put/seqid=0 2024-12-03T21:18:04,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741851_1027 (size=5153) 2024-12-03T21:18:04,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741851_1027 (size=5153) 2024-12-03T21:18:04,188 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/hbase/meta/1588230740/.tmp/ns/6a700e5eb4a8462db2c7ffe66c767616 2024-12-03T21:18:04,185 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=101545f66cbd:41407 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-03T21:18:04,191 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [d052b60c9f8b47598ba7191e09838be8=12509, 334e2c3c536a48ec81fa9440a75e4a5b=12509, 552cf4c69b4045aab34c2558424518d3=12509] 2024-12-03T21:18:04,197 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/default/TestLogRolling-testSlowSyncLogRolling/235dcab78bdc25aa84b1c078e8a6d6ce/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-12-03T21:18:04,199 INFO [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce. 2024-12-03T21:18:04,199 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 235dcab78bdc25aa84b1c078e8a6d6ce: Waiting for close lock at 1733260684125Running coprocessor pre-close hooks at 1733260684126 (+1 ms)Disabling compacts and flushes for region at 1733260684126Disabling writes for close at 1733260684126Obtaining lock to block concurrent updates at 1733260684126Preparing flush snapshotting stores in 235dcab78bdc25aa84b1c078e8a6d6ce at 1733260684126Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1733260684126Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce. at 1733260684128 (+2 ms)Flushing 235dcab78bdc25aa84b1c078e8a6d6ce/info: creating writer at 1733260684128Flushing 235dcab78bdc25aa84b1c078e8a6d6ce/info: appending metadata at 1733260684133 (+5 ms)Flushing 235dcab78bdc25aa84b1c078e8a6d6ce/info: closing flushed file at 1733260684133Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4dd4b55: reopening flushed file at 1733260684151 (+18 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 235dcab78bdc25aa84b1c078e8a6d6ce in 37ms, sequenceid=48, compaction requested=true at 1733260684163 (+12 ms)Writing region close event to WAL at 1733260684192 (+29 ms)Running coprocessor post-close hooks at 1733260684197 (+5 ms)Closed at 1733260684199 (+2 ms) 2024-12-03T21:18:04,200 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1733260596588.235dcab78bdc25aa84b1c078e8a6d6ce. 2024-12-03T21:18:04,216 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/hbase/meta/1588230740/.tmp/table/6ed3be6ffcb74b99adace8ff670b2768 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1733260597082/Put/seqid=0 2024-12-03T21:18:04,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741852_1028 (size=5396) 2024-12-03T21:18:04,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741852_1028 (size=5396) 2024-12-03T21:18:04,235 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/hbase/meta/1588230740/.tmp/table/6ed3be6ffcb74b99adace8ff670b2768 2024-12-03T21:18:04,245 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/hbase/meta/1588230740/.tmp/info/b016f98f5858481bb7945742c523f727 as hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/hbase/meta/1588230740/info/b016f98f5858481bb7945742c523f727 2024-12-03T21:18:04,256 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/hbase/meta/1588230740/info/b016f98f5858481bb7945742c523f727, entries=10, sequenceid=11, filesize=6.9 K 2024-12-03T21:18:04,257 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/hbase/meta/1588230740/.tmp/ns/6a700e5eb4a8462db2c7ffe66c767616 as hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/hbase/meta/1588230740/ns/6a700e5eb4a8462db2c7ffe66c767616 2024-12-03T21:18:04,267 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/hbase/meta/1588230740/ns/6a700e5eb4a8462db2c7ffe66c767616, entries=2, sequenceid=11, filesize=5.0 K 2024-12-03T21:18:04,268 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/hbase/meta/1588230740/.tmp/table/6ed3be6ffcb74b99adace8ff670b2768 as hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/hbase/meta/1588230740/table/6ed3be6ffcb74b99adace8ff670b2768 2024-12-03T21:18:04,277 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/hbase/meta/1588230740/table/6ed3be6ffcb74b99adace8ff670b2768, entries=2, sequenceid=11, filesize=5.3 K 2024-12-03T21:18:04,279 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 152ms, sequenceid=11, compaction requested=false 2024-12-03T21:18:04,285 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-03T21:18:04,285 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T21:18:04,286 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T21:18:04,286 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733260684126Running coprocessor pre-close hooks at 1733260684126Disabling compacts and flushes for region at 1733260684126Disabling writes for close at 1733260684126Obtaining lock to block concurrent updates at 1733260684127 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733260684127Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1733260684127Flushing stores of hbase:meta,,1.1588230740 at 1733260684128 (+1 ms)Flushing 1588230740/info: creating writer at 1733260684128Flushing 1588230740/info: appending metadata at 1733260684150 (+22 ms)Flushing 1588230740/info: closing flushed file at 1733260684150Flushing 1588230740/ns: creating writer at 1733260684166 (+16 ms)Flushing 1588230740/ns: appending metadata at 1733260684180 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733260684181 (+1 ms)Flushing 1588230740/table: creating writer at 1733260684198 (+17 ms)Flushing 1588230740/table: appending metadata at 1733260684215 (+17 ms)Flushing 1588230740/table: closing flushed file at 1733260684215Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@745e0d70: reopening flushed file at 1733260684243 (+28 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@c6d9e6b: reopening flushed file at 1733260684256 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@18c9683c: reopening flushed file at 1733260684267 (+11 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 152ms, sequenceid=11, compaction requested=false at 1733260684279 (+12 ms)Writing region close event to WAL at 1733260684280 (+1 ms)Running coprocessor post-close hooks at 1733260684285 (+5 ms)Closed at 1733260684286 (+1 ms) 2024-12-03T21:18:04,286 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-03T21:18:04,327 INFO [RS:0;101545f66cbd:39079 {}] regionserver.HRegionServer(976): stopping server 101545f66cbd,39079,1733260594106; all regions closed. 2024-12-03T21:18:04,328 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:04,328 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:04,329 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:04,329 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:04,329 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:04,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741834_1010 (size=3066) 2024-12-03T21:18:04,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741834_1010 (size=3066) 2024-12-03T21:18:04,335 DEBUG [RS:0;101545f66cbd:39079 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/oldWALs 2024-12-03T21:18:04,335 INFO [RS:0;101545f66cbd:39079 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 101545f66cbd%2C39079%2C1733260594106.meta:.meta(num 1733260596209) 2024-12-03T21:18:04,335 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:04,336 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:04,336 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:04,336 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:04,336 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:04,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741846_1022 (size=13040) 2024-12-03T21:18:04,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741846_1022 (size=13040) 2024-12-03T21:18:04,347 DEBUG [RS:0;101545f66cbd:39079 {}] wal.AbstractFSWAL(1256): Moved 5 WAL file(s) to /user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/oldWALs 2024-12-03T21:18:04,347 INFO [RS:0;101545f66cbd:39079 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 101545f66cbd%2C39079%2C1733260594106:(num 1733260664055) 2024-12-03T21:18:04,347 DEBUG [RS:0;101545f66cbd:39079 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:18:04,347 INFO [RS:0;101545f66cbd:39079 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T21:18:04,348 INFO [RS:0;101545f66cbd:39079 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T21:18:04,348 INFO [RS:0;101545f66cbd:39079 {}] hbase.ChoreService(370): Chore service for: regionserver/101545f66cbd:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-03T21:18:04,348 INFO [RS:0;101545f66cbd:39079 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T21:18:04,348 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T21:18:04,349 INFO [RS:0;101545f66cbd:39079 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39079 2024-12-03T21:18:04,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41407-0x1019e5776ec0000, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T21:18:04,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39079-0x1019e5776ec0001, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/101545f66cbd,39079,1733260594106 2024-12-03T21:18:04,356 INFO [RS:0;101545f66cbd:39079 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T21:18:04,365 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [101545f66cbd,39079,1733260594106] 2024-12-03T21:18:04,373 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/101545f66cbd,39079,1733260594106 already deleted, retry=false 2024-12-03T21:18:04,373 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 101545f66cbd,39079,1733260594106 expired; onlineServers=0 2024-12-03T21:18:04,373 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '101545f66cbd,41407,1733260593312' ***** 2024-12-03T21:18:04,374 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T21:18:04,374 INFO [M:0;101545f66cbd:41407 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T21:18:04,374 INFO [M:0;101545f66cbd:41407 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T21:18:04,374 DEBUG [M:0;101545f66cbd:41407 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T21:18:04,374 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T21:18:04,374 DEBUG [M:0;101545f66cbd:41407 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T21:18:04,374 DEBUG [master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.large.0-1733260595296 {}] cleaner.HFileCleaner(306): Exit Thread[master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.large.0-1733260595296,5,FailOnTimeoutGroup] 2024-12-03T21:18:04,374 DEBUG [master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.small.0-1733260595298 {}] cleaner.HFileCleaner(306): Exit Thread[master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.small.0-1733260595298,5,FailOnTimeoutGroup] 2024-12-03T21:18:04,374 INFO [M:0;101545f66cbd:41407 {}] hbase.ChoreService(370): Chore service for: master/101545f66cbd:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-03T21:18:04,374 INFO [M:0;101545f66cbd:41407 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T21:18:04,374 DEBUG [M:0;101545f66cbd:41407 {}] master.HMaster(1795): Stopping service threads 2024-12-03T21:18:04,374 INFO [M:0;101545f66cbd:41407 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T21:18:04,375 INFO [M:0;101545f66cbd:41407 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T21:18:04,375 INFO [M:0;101545f66cbd:41407 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T21:18:04,375 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T21:18:04,381 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41407-0x1019e5776ec0000, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T21:18:04,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41407-0x1019e5776ec0000, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:04,382 DEBUG [M:0;101545f66cbd:41407 {}] zookeeper.ZKUtil(347): master:41407-0x1019e5776ec0000, quorum=127.0.0.1:52448, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-03T21:18:04,382 WARN [M:0;101545f66cbd:41407 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-03T21:18:04,383 INFO [M:0;101545f66cbd:41407 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/.lastflushedseqids 2024-12-03T21:18:04,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741853_1029 (size=130) 2024-12-03T21:18:04,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741853_1029 (size=130) 2024-12-03T21:18:04,395 INFO [M:0;101545f66cbd:41407 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-03T21:18:04,395 INFO [M:0;101545f66cbd:41407 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T21:18:04,395 DEBUG [M:0;101545f66cbd:41407 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T21:18:04,395 INFO [M:0;101545f66cbd:41407 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:18:04,395 DEBUG [M:0;101545f66cbd:41407 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:18:04,395 DEBUG [M:0;101545f66cbd:41407 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T21:18:04,395 DEBUG [M:0;101545f66cbd:41407 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:18:04,395 INFO [M:0;101545f66cbd:41407 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.04 KB heapSize=29.21 KB 2024-12-03T21:18:04,412 DEBUG [M:0;101545f66cbd:41407 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3c91b4c6fcf44aef8a921ab0b88a128e is 82, key is hbase:meta,,1/info:regioninfo/1733260596279/Put/seqid=0 2024-12-03T21:18:04,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741854_1030 (size=5672) 2024-12-03T21:18:04,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741854_1030 (size=5672) 2024-12-03T21:18:04,418 INFO [M:0;101545f66cbd:41407 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3c91b4c6fcf44aef8a921ab0b88a128e 2024-12-03T21:18:04,442 DEBUG [M:0;101545f66cbd:41407 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/75a7ee3a5fe34c96a7c5167097fe7f08 is 767, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733260597092/Put/seqid=0 2024-12-03T21:18:04,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741855_1031 (size=6248) 2024-12-03T21:18:04,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741855_1031 (size=6248) 2024-12-03T21:18:04,449 INFO [M:0;101545f66cbd:41407 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.43 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/75a7ee3a5fe34c96a7c5167097fe7f08 2024-12-03T21:18:04,459 INFO [M:0;101545f66cbd:41407 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 75a7ee3a5fe34c96a7c5167097fe7f08 2024-12-03T21:18:04,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39079-0x1019e5776ec0001, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T21:18:04,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39079-0x1019e5776ec0001, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T21:18:04,466 INFO [RS:0;101545f66cbd:39079 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T21:18:04,466 INFO [RS:0;101545f66cbd:39079 {}] regionserver.HRegionServer(1031): Exiting; stopping=101545f66cbd,39079,1733260594106; zookeeper connection closed. 2024-12-03T21:18:04,471 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6b1bcf6f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6b1bcf6f 2024-12-03T21:18:04,473 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-03T21:18:04,481 DEBUG [M:0;101545f66cbd:41407 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9a93f65f7d8a4eba9a252b0e40c4e45c is 69, key is 101545f66cbd,39079,1733260594106/rs:state/1733260595349/Put/seqid=0 2024-12-03T21:18:04,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741856_1032 (size=5156) 2024-12-03T21:18:04,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741856_1032 (size=5156) 2024-12-03T21:18:04,490 INFO [M:0;101545f66cbd:41407 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9a93f65f7d8a4eba9a252b0e40c4e45c 2024-12-03T21:18:04,521 DEBUG [M:0;101545f66cbd:41407 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e21204c480954f3a98c18f29cc913dc3 is 52, key is load_balancer_on/state:d/1733260596567/Put/seqid=0 2024-12-03T21:18:04,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741857_1033 (size=5056) 2024-12-03T21:18:04,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741857_1033 (size=5056) 2024-12-03T21:18:04,529 INFO [M:0;101545f66cbd:41407 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e21204c480954f3a98c18f29cc913dc3 2024-12-03T21:18:04,536 DEBUG [M:0;101545f66cbd:41407 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3c91b4c6fcf44aef8a921ab0b88a128e as hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3c91b4c6fcf44aef8a921ab0b88a128e 2024-12-03T21:18:04,543 INFO [M:0;101545f66cbd:41407 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3c91b4c6fcf44aef8a921ab0b88a128e, entries=8, sequenceid=59, filesize=5.5 K 2024-12-03T21:18:04,545 DEBUG [M:0;101545f66cbd:41407 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/75a7ee3a5fe34c96a7c5167097fe7f08 as hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/75a7ee3a5fe34c96a7c5167097fe7f08 2024-12-03T21:18:04,551 INFO [M:0;101545f66cbd:41407 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 75a7ee3a5fe34c96a7c5167097fe7f08 2024-12-03T21:18:04,552 INFO [M:0;101545f66cbd:41407 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/75a7ee3a5fe34c96a7c5167097fe7f08, entries=6, sequenceid=59, filesize=6.1 K 2024-12-03T21:18:04,553 DEBUG [M:0;101545f66cbd:41407 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9a93f65f7d8a4eba9a252b0e40c4e45c as hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9a93f65f7d8a4eba9a252b0e40c4e45c 2024-12-03T21:18:04,560 INFO [M:0;101545f66cbd:41407 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9a93f65f7d8a4eba9a252b0e40c4e45c, entries=1, sequenceid=59, filesize=5.0 K 2024-12-03T21:18:04,561 DEBUG [M:0;101545f66cbd:41407 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e21204c480954f3a98c18f29cc913dc3 as hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e21204c480954f3a98c18f29cc913dc3 2024-12-03T21:18:04,568 INFO [M:0;101545f66cbd:41407 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e21204c480954f3a98c18f29cc913dc3, entries=1, sequenceid=59, filesize=4.9 K 2024-12-03T21:18:04,570 INFO [M:0;101545f66cbd:41407 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 175ms, sequenceid=59, compaction requested=false 2024-12-03T21:18:04,572 INFO [M:0;101545f66cbd:41407 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:18:04,572 DEBUG [M:0;101545f66cbd:41407 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733260684395Disabling compacts and flushes for region at 1733260684395Disabling writes for close at 1733260684395Obtaining lock to block concurrent updates at 1733260684395Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733260684395Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23588, getHeapSize=29848, getOffHeapSize=0, getCellsCount=70 at 1733260684396 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733260684397 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733260684397Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733260684411 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733260684411Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733260684425 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733260684441 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733260684441Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733260684459 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733260684480 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733260684480Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733260684500 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733260684521 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733260684521Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@a27941e: reopening flushed file at 1733260684535 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7a912682: reopening flushed file at 1733260684543 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4e8495: reopening flushed file at 1733260684552 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5125bd86: reopening flushed file at 1733260684560 (+8 ms)Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 175ms, sequenceid=59, compaction requested=false at 1733260684570 (+10 ms)Writing region close event to WAL at 1733260684572 (+2 ms)Closed at 1733260684572 2024-12-03T21:18:04,573 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:04,573 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:04,573 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:04,573 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:04,573 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:04,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39801 is added to blk_1073741830_1006 (size=27985) 2024-12-03T21:18:04,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41099 is added to blk_1073741830_1006 (size=27985) 2024-12-03T21:18:04,577 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T21:18:04,577 INFO [M:0;101545f66cbd:41407 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-03T21:18:04,577 INFO [M:0;101545f66cbd:41407 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41407 2024-12-03T21:18:04,577 INFO [M:0;101545f66cbd:41407 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T21:18:04,688 INFO [M:0;101545f66cbd:41407 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T21:18:04,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41407-0x1019e5776ec0000, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T21:18:04,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41407-0x1019e5776ec0000, quorum=127.0.0.1:52448, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T21:18:04,727 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6c963ecd{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:18:04,729 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5a10aed{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T21:18:04,729 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T21:18:04,729 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25ca9bb3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T21:18:04,730 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ff5148a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/hadoop.log.dir/,STOPPED} 2024-12-03T21:18:04,733 WARN [BP-1709057562-172.17.0.2-1733260589147 heartbeating to localhost/127.0.0.1:43101 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T21:18:04,733 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T21:18:04,733 WARN [BP-1709057562-172.17.0.2-1733260589147 heartbeating to localhost/127.0.0.1:43101 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1709057562-172.17.0.2-1733260589147 (Datanode Uuid c24d0376-aebc-43f6-88f4-89c8658bddef) service to localhost/127.0.0.1:43101 2024-12-03T21:18:04,733 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T21:18:04,735 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/cluster_2fbebfbd-0181-9d97-ac44-56e537377f9a/data/data3/current/BP-1709057562-172.17.0.2-1733260589147 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:18:04,735 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/cluster_2fbebfbd-0181-9d97-ac44-56e537377f9a/data/data4/current/BP-1709057562-172.17.0.2-1733260589147 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:18:04,736 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T21:18:04,738 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3f93babe{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:18:04,739 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@737d6c99{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T21:18:04,739 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T21:18:04,739 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7893eb07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T21:18:04,739 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3305dd74{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/hadoop.log.dir/,STOPPED} 2024-12-03T21:18:04,741 WARN [BP-1709057562-172.17.0.2-1733260589147 heartbeating to localhost/127.0.0.1:43101 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T21:18:04,741 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T21:18:04,741 WARN [BP-1709057562-172.17.0.2-1733260589147 heartbeating to localhost/127.0.0.1:43101 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1709057562-172.17.0.2-1733260589147 (Datanode Uuid 390180c2-d368-47b5-b469-72cd7a5213e0) service to localhost/127.0.0.1:43101 2024-12-03T21:18:04,741 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T21:18:04,742 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/cluster_2fbebfbd-0181-9d97-ac44-56e537377f9a/data/data1/current/BP-1709057562-172.17.0.2-1733260589147 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:18:04,742 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/cluster_2fbebfbd-0181-9d97-ac44-56e537377f9a/data/data2/current/BP-1709057562-172.17.0.2-1733260589147 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:18:04,743 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T21:18:04,772 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6de997b9{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T21:18:04,773 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7a0da00a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T21:18:04,773 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T21:18:04,773 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@380b8195{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T21:18:04,773 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3aee6cb7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/hadoop.log.dir/,STOPPED} 2024-12-03T21:18:04,786 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-03T21:18:04,821 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-03T21:18:04,829 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=78 (was 12) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43101 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:43101 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43101 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: master/101545f66cbd:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:43101 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43101 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: regionserver/101545f66cbd:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: regionserver/101545f66cbd:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@5f2d9ff8 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:43101 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: master/101545f66cbd:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43101 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:43101 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) - Thread LEAK? -, OpenFileDescriptor=405 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=172 (was 472), ProcessCount=11 (was 11), AvailableMemoryMB=3047 (was 2662) - AvailableMemoryMB LEAK? - 2024-12-03T21:18:04,834 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=79, OpenFileDescriptor=405, MaxFileDescriptor=1048576, SystemLoadAverage=172, ProcessCount=11, AvailableMemoryMB=3047 2024-12-03T21:18:04,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T21:18:04,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/hadoop.log.dir so I do NOT create it in target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de 2024-12-03T21:18:04,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/344d308d-f071-95a5-c3c7-d38cf3857eeb/hadoop.tmp.dir so I do NOT create it in target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de 2024-12-03T21:18:04,835 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/cluster_3a4e0250-62c5-90f6-5f45-566e5c61742f, deleteOnExit=true 2024-12-03T21:18:04,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-03T21:18:04,836 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/test.cache.data in system properties and HBase conf 2024-12-03T21:18:04,836 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T21:18:04,836 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/hadoop.log.dir in system properties and HBase conf 2024-12-03T21:18:04,836 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T21:18:04,837 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T21:18:04,837 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-03T21:18:04,837 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T21:18:04,837 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T21:18:04,837 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T21:18:04,838 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T21:18:04,838 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T21:18:04,838 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T21:18:04,838 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T21:18:04,838 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T21:18:04,838 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T21:18:04,838 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T21:18:04,839 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/nfs.dump.dir in system properties and HBase conf 2024-12-03T21:18:04,839 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/java.io.tmpdir in system properties and HBase conf 2024-12-03T21:18:04,839 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T21:18:04,839 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T21:18:04,839 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T21:18:04,854 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T21:18:05,396 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:18:05,403 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T21:18:05,404 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T21:18:05,405 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T21:18:05,405 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T21:18:05,405 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:18:05,406 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c8f0dfe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/hadoop.log.dir/,AVAILABLE} 2024-12-03T21:18:05,406 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@495543c4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T21:18:05,436 INFO [regionserver/101545f66cbd:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T21:18:05,510 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2787d4b6{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/java.io.tmpdir/jetty-localhost-40899-hadoop-hdfs-3_4_1-tests_jar-_-any-16190977679334385964/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T21:18:05,511 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6f450072{HTTP/1.1, (http/1.1)}{localhost:40899} 2024-12-03T21:18:05,511 INFO [Time-limited test {}] server.Server(415): Started @98609ms 2024-12-03T21:18:05,522 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T21:18:05,737 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:18:05,740 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T21:18:05,741 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T21:18:05,741 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T21:18:05,741 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T21:18:05,742 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a3a779{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/hadoop.log.dir/,AVAILABLE} 2024-12-03T21:18:05,742 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4b4bedb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T21:18:05,835 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4a843c6e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/java.io.tmpdir/jetty-localhost-38499-hadoop-hdfs-3_4_1-tests_jar-_-any-7402102000618505067/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:18:05,835 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@17783943{HTTP/1.1, (http/1.1)}{localhost:38499} 2024-12-03T21:18:05,835 INFO [Time-limited test {}] server.Server(415): Started @98933ms 2024-12-03T21:18:05,837 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T21:18:05,881 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:18:05,885 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T21:18:05,886 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T21:18:05,886 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T21:18:05,886 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T21:18:05,886 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c5ed954{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/hadoop.log.dir/,AVAILABLE} 2024-12-03T21:18:05,887 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1db3e806{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T21:18:05,981 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@513d1177{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/java.io.tmpdir/jetty-localhost-36343-hadoop-hdfs-3_4_1-tests_jar-_-any-16706708004799132175/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:18:05,982 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3c0a2992{HTTP/1.1, (http/1.1)}{localhost:36343} 2024-12-03T21:18:05,982 INFO [Time-limited test {}] server.Server(415): Started @99080ms 2024-12-03T21:18:05,983 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T21:18:06,855 WARN [Thread-445 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/cluster_3a4e0250-62c5-90f6-5f45-566e5c61742f/data/data1/current/BP-356274704-172.17.0.2-1733260684866/current, will proceed with Du for space computation calculation, 2024-12-03T21:18:06,855 WARN [Thread-446 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/cluster_3a4e0250-62c5-90f6-5f45-566e5c61742f/data/data2/current/BP-356274704-172.17.0.2-1733260684866/current, will proceed with Du for space computation calculation, 2024-12-03T21:18:06,874 WARN [Thread-409 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T21:18:06,877 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe14bc2f6732ff42d with lease ID 0x4b69fa8926a1837f: Processing first storage report for DS-4c4f6103-e6b7-4fcb-865d-2eb0852a4010 from datanode DatanodeRegistration(127.0.0.1:44271, datanodeUuid=b5add678-ed02-4ce3-86eb-81bc629c7d93, infoPort=44479, infoSecurePort=0, ipcPort=44111, storageInfo=lv=-57;cid=testClusterID;nsid=1850560613;c=1733260684866) 2024-12-03T21:18:06,877 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe14bc2f6732ff42d with lease ID 0x4b69fa8926a1837f: from storage DS-4c4f6103-e6b7-4fcb-865d-2eb0852a4010 node DatanodeRegistration(127.0.0.1:44271, datanodeUuid=b5add678-ed02-4ce3-86eb-81bc629c7d93, infoPort=44479, infoSecurePort=0, ipcPort=44111, storageInfo=lv=-57;cid=testClusterID;nsid=1850560613;c=1733260684866), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:18:06,877 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe14bc2f6732ff42d with lease ID 0x4b69fa8926a1837f: Processing first storage report for DS-ba685351-9b48-438c-8088-3bbd2c07513f from datanode DatanodeRegistration(127.0.0.1:44271, datanodeUuid=b5add678-ed02-4ce3-86eb-81bc629c7d93, infoPort=44479, infoSecurePort=0, ipcPort=44111, storageInfo=lv=-57;cid=testClusterID;nsid=1850560613;c=1733260684866) 2024-12-03T21:18:06,877 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe14bc2f6732ff42d with lease ID 0x4b69fa8926a1837f: from storage DS-ba685351-9b48-438c-8088-3bbd2c07513f node DatanodeRegistration(127.0.0.1:44271, datanodeUuid=b5add678-ed02-4ce3-86eb-81bc629c7d93, infoPort=44479, infoSecurePort=0, ipcPort=44111, storageInfo=lv=-57;cid=testClusterID;nsid=1850560613;c=1733260684866), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:18:07,024 WARN [Thread-457 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/cluster_3a4e0250-62c5-90f6-5f45-566e5c61742f/data/data4/current/BP-356274704-172.17.0.2-1733260684866/current, will proceed with Du for space computation calculation, 2024-12-03T21:18:07,024 WARN [Thread-456 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/cluster_3a4e0250-62c5-90f6-5f45-566e5c61742f/data/data3/current/BP-356274704-172.17.0.2-1733260684866/current, will proceed with Du for space computation calculation, 2024-12-03T21:18:07,043 WARN [Thread-432 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T21:18:07,046 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcfe69ee7f02a2fc2 with lease ID 0x4b69fa8926a18380: Processing first storage report for DS-ff26624e-f989-4d48-9d1d-26c12560caad from datanode DatanodeRegistration(127.0.0.1:33897, datanodeUuid=c48fce4f-e911-44ad-8127-8fe377e83144, infoPort=39017, infoSecurePort=0, ipcPort=39601, storageInfo=lv=-57;cid=testClusterID;nsid=1850560613;c=1733260684866) 2024-12-03T21:18:07,046 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcfe69ee7f02a2fc2 with lease ID 0x4b69fa8926a18380: from storage DS-ff26624e-f989-4d48-9d1d-26c12560caad node DatanodeRegistration(127.0.0.1:33897, datanodeUuid=c48fce4f-e911-44ad-8127-8fe377e83144, infoPort=39017, infoSecurePort=0, ipcPort=39601, storageInfo=lv=-57;cid=testClusterID;nsid=1850560613;c=1733260684866), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:18:07,046 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcfe69ee7f02a2fc2 with lease ID 0x4b69fa8926a18380: Processing first storage report for DS-f5a71253-9ad8-42c4-8b45-71ffec19e191 from datanode DatanodeRegistration(127.0.0.1:33897, datanodeUuid=c48fce4f-e911-44ad-8127-8fe377e83144, infoPort=39017, infoSecurePort=0, ipcPort=39601, storageInfo=lv=-57;cid=testClusterID;nsid=1850560613;c=1733260684866) 2024-12-03T21:18:07,046 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcfe69ee7f02a2fc2 with lease ID 0x4b69fa8926a18380: from storage DS-f5a71253-9ad8-42c4-8b45-71ffec19e191 node DatanodeRegistration(127.0.0.1:33897, datanodeUuid=c48fce4f-e911-44ad-8127-8fe377e83144, infoPort=39017, infoSecurePort=0, ipcPort=39601, storageInfo=lv=-57;cid=testClusterID;nsid=1850560613;c=1733260684866), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:18:07,120 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de 2024-12-03T21:18:07,123 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/cluster_3a4e0250-62c5-90f6-5f45-566e5c61742f/zookeeper_0, clientPort=64771, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/cluster_3a4e0250-62c5-90f6-5f45-566e5c61742f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/cluster_3a4e0250-62c5-90f6-5f45-566e5c61742f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T21:18:07,124 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64771 2024-12-03T21:18:07,124 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:18:07,126 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:18:07,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33897 is added to blk_1073741825_1001 (size=7) 2024-12-03T21:18:07,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44271 is added to blk_1073741825_1001 (size=7) 2024-12-03T21:18:07,139 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e with version=8 2024-12-03T21:18:07,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/hbase-staging 2024-12-03T21:18:07,142 INFO [Time-limited test {}] client.ConnectionUtils(128): master/101545f66cbd:0 server-side Connection retries=45 2024-12-03T21:18:07,142 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T21:18:07,143 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T21:18:07,143 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T21:18:07,143 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T21:18:07,143 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T21:18:07,143 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-03T21:18:07,143 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T21:18:07,144 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42287 2024-12-03T21:18:07,146 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42287 connecting to ZooKeeper ensemble=127.0.0.1:64771 2024-12-03T21:18:07,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:422870x0, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T21:18:07,234 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42287-0x1019e58e8fb0000 connected 2024-12-03T21:18:07,307 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:18:07,311 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:18:07,317 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42287-0x1019e58e8fb0000, quorum=127.0.0.1:64771, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T21:18:07,317 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e, hbase.cluster.distributed=false 2024-12-03T21:18:07,319 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42287-0x1019e58e8fb0000, quorum=127.0.0.1:64771, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T21:18:07,320 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42287 2024-12-03T21:18:07,320 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42287 2024-12-03T21:18:07,320 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42287 2024-12-03T21:18:07,321 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42287 2024-12-03T21:18:07,321 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42287 2024-12-03T21:18:07,336 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/101545f66cbd:0 server-side Connection retries=45 2024-12-03T21:18:07,336 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T21:18:07,336 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T21:18:07,336 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T21:18:07,336 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T21:18:07,336 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T21:18:07,336 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T21:18:07,336 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T21:18:07,337 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36853 2024-12-03T21:18:07,338 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36853 connecting to ZooKeeper ensemble=127.0.0.1:64771 2024-12-03T21:18:07,339 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:18:07,341 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:18:07,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:368530x0, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T21:18:07,355 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:368530x0, quorum=127.0.0.1:64771, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T21:18:07,355 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36853-0x1019e58e8fb0001 connected 2024-12-03T21:18:07,355 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T21:18:07,359 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T21:18:07,361 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36853-0x1019e58e8fb0001, quorum=127.0.0.1:64771, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T21:18:07,362 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36853-0x1019e58e8fb0001, quorum=127.0.0.1:64771, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T21:18:07,363 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36853 2024-12-03T21:18:07,363 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36853 2024-12-03T21:18:07,367 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36853 2024-12-03T21:18:07,370 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36853 2024-12-03T21:18:07,370 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36853 2024-12-03T21:18:07,388 DEBUG [M:0;101545f66cbd:42287 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;101545f66cbd:42287 2024-12-03T21:18:07,389 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/101545f66cbd,42287,1733260687142 2024-12-03T21:18:07,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42287-0x1019e58e8fb0000, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T21:18:07,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36853-0x1019e58e8fb0001, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T21:18:07,399 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42287-0x1019e58e8fb0000, quorum=127.0.0.1:64771, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/101545f66cbd,42287,1733260687142 2024-12-03T21:18:07,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42287-0x1019e58e8fb0000, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:07,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36853-0x1019e58e8fb0001, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T21:18:07,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36853-0x1019e58e8fb0001, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:07,415 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42287-0x1019e58e8fb0000, quorum=127.0.0.1:64771, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T21:18:07,416 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/101545f66cbd,42287,1733260687142 from backup master directory 2024-12-03T21:18:07,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36853-0x1019e58e8fb0001, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T21:18:07,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42287-0x1019e58e8fb0000, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/101545f66cbd,42287,1733260687142 2024-12-03T21:18:07,423 WARN [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T21:18:07,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42287-0x1019e58e8fb0000, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T21:18:07,423 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=101545f66cbd,42287,1733260687142 2024-12-03T21:18:07,434 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/hbase.id] with ID: 379b0954-3968-4d92-b00c-26b9becb398c 2024-12-03T21:18:07,435 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/.tmp/hbase.id 2024-12-03T21:18:07,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33897 is added to blk_1073741826_1002 (size=42) 2024-12-03T21:18:07,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44271 is added to blk_1073741826_1002 (size=42) 2024-12-03T21:18:07,443 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/.tmp/hbase.id]:[hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/hbase.id] 2024-12-03T21:18:07,466 INFO [master/101545f66cbd:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:18:07,467 INFO [master/101545f66cbd:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-03T21:18:07,469 INFO [master/101545f66cbd:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-03T21:18:07,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36853-0x1019e58e8fb0001, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:07,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42287-0x1019e58e8fb0000, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:07,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33897 is added to blk_1073741827_1003 (size=196) 2024-12-03T21:18:07,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44271 is added to blk_1073741827_1003 (size=196) 2024-12-03T21:18:07,490 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T21:18:07,491 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T21:18:07,492 INFO [master/101545f66cbd:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T21:18:07,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33897 is added to blk_1073741828_1004 (size=1189) 2024-12-03T21:18:07,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44271 is added to blk_1073741828_1004 (size=1189) 2024-12-03T21:18:07,507 INFO [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/MasterData/data/master/store 2024-12-03T21:18:07,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33897 is added to blk_1073741829_1005 (size=34) 2024-12-03T21:18:07,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44271 is added to blk_1073741829_1005 (size=34) 2024-12-03T21:18:07,515 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:18:07,515 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T21:18:07,516 INFO [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:18:07,516 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:18:07,516 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T21:18:07,516 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:18:07,516 INFO [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:18:07,516 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733260687515Disabling compacts and flushes for region at 1733260687515Disabling writes for close at 1733260687516 (+1 ms)Writing region close event to WAL at 1733260687516Closed at 1733260687516 2024-12-03T21:18:07,517 WARN [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/MasterData/data/master/store/.initializing 2024-12-03T21:18:07,517 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/MasterData/WALs/101545f66cbd,42287,1733260687142 2024-12-03T21:18:07,520 INFO [master/101545f66cbd:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=101545f66cbd%2C42287%2C1733260687142, suffix=, logDir=hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/MasterData/WALs/101545f66cbd,42287,1733260687142, archiveDir=hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/MasterData/oldWALs, maxLogs=10 2024-12-03T21:18:07,521 INFO [master/101545f66cbd:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C42287%2C1733260687142.1733260687521 2024-12-03T21:18:07,527 INFO [master/101545f66cbd:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/MasterData/WALs/101545f66cbd,42287,1733260687142/101545f66cbd%2C42287%2C1733260687142.1733260687521 2024-12-03T21:18:07,531 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39017:39017),(127.0.0.1/127.0.0.1:44479:44479)] 2024-12-03T21:18:07,532 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T21:18:07,532 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:18:07,532 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:07,532 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:07,534 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:07,535 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T21:18:07,535 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:07,536 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:18:07,536 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:07,537 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T21:18:07,538 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:07,538 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T21:18:07,538 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:07,540 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T21:18:07,541 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:07,541 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T21:18:07,542 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:07,543 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T21:18:07,543 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:07,544 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T21:18:07,544 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:07,545 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:07,546 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:07,548 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:07,548 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:07,548 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T21:18:07,550 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:07,553 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T21:18:07,554 INFO [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=815301, jitterRate=0.0367099791765213}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T21:18:07,555 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733260687532Initializing all the Stores at 1733260687533 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260687533Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260687534 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260687534Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260687534Cleaning up temporary data from old regions at 1733260687548 (+14 ms)Region opened successfully at 1733260687555 (+7 ms) 2024-12-03T21:18:07,555 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T21:18:07,560 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7806d031, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=101545f66cbd/172.17.0.2:0 2024-12-03T21:18:07,561 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-03T21:18:07,561 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T21:18:07,561 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T21:18:07,561 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T21:18:07,562 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-03T21:18:07,562 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-03T21:18:07,562 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T21:18:07,565 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T21:18:07,566 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42287-0x1019e58e8fb0000, quorum=127.0.0.1:64771, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T21:18:07,573 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-03T21:18:07,573 INFO [master/101545f66cbd:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T21:18:07,574 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42287-0x1019e58e8fb0000, quorum=127.0.0.1:64771, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T21:18:07,581 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-03T21:18:07,582 INFO [master/101545f66cbd:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T21:18:07,583 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42287-0x1019e58e8fb0000, quorum=127.0.0.1:64771, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T21:18:07,590 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-03T21:18:07,591 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42287-0x1019e58e8fb0000, quorum=127.0.0.1:64771, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T21:18:07,598 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T21:18:07,600 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42287-0x1019e58e8fb0000, quorum=127.0.0.1:64771, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T21:18:07,606 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T21:18:07,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42287-0x1019e58e8fb0000, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T21:18:07,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36853-0x1019e58e8fb0001, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T21:18:07,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42287-0x1019e58e8fb0000, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:07,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36853-0x1019e58e8fb0001, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:07,615 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=101545f66cbd,42287,1733260687142, sessionid=0x1019e58e8fb0000, setting cluster-up flag (Was=false) 2024-12-03T21:18:07,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42287-0x1019e58e8fb0000, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:07,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36853-0x1019e58e8fb0001, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:07,656 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T21:18:07,658 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=101545f66cbd,42287,1733260687142 2024-12-03T21:18:07,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42287-0x1019e58e8fb0000, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:07,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36853-0x1019e58e8fb0001, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:07,698 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T21:18:07,699 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=101545f66cbd,42287,1733260687142 2024-12-03T21:18:07,701 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-03T21:18:07,702 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-03T21:18:07,703 INFO [master/101545f66cbd:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-03T21:18:07,703 INFO [master/101545f66cbd:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T21:18:07,703 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 101545f66cbd,42287,1733260687142 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T21:18:07,705 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/101545f66cbd:0, corePoolSize=5, maxPoolSize=5 2024-12-03T21:18:07,705 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/101545f66cbd:0, corePoolSize=5, maxPoolSize=5 2024-12-03T21:18:07,705 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/101545f66cbd:0, corePoolSize=5, maxPoolSize=5 2024-12-03T21:18:07,705 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/101545f66cbd:0, corePoolSize=5, maxPoolSize=5 2024-12-03T21:18:07,705 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/101545f66cbd:0, corePoolSize=10, maxPoolSize=10 2024-12-03T21:18:07,705 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:07,705 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/101545f66cbd:0, corePoolSize=2, maxPoolSize=2 2024-12-03T21:18:07,705 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:07,706 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733260717706 2024-12-03T21:18:07,706 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T21:18:07,706 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T21:18:07,706 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T21:18:07,706 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T21:18:07,706 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T21:18:07,707 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T21:18:07,707 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:07,707 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T21:18:07,707 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T21:18:07,708 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T21:18:07,708 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T21:18:07,708 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-03T21:18:07,708 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T21:18:07,708 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T21:18:07,708 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.large.0-1733260687708,5,FailOnTimeoutGroup] 2024-12-03T21:18:07,709 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.small.0-1733260687708,5,FailOnTimeoutGroup] 2024-12-03T21:18:07,709 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:07,709 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T21:18:07,709 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:07,709 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:07,709 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:07,709 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T21:18:07,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44271 is added to blk_1073741831_1007 (size=1321) 2024-12-03T21:18:07,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33897 is added to blk_1073741831_1007 (size=1321) 2024-12-03T21:18:07,772 INFO [RS:0;101545f66cbd:36853 {}] regionserver.HRegionServer(746): ClusterId : 379b0954-3968-4d92-b00c-26b9becb398c 2024-12-03T21:18:07,773 DEBUG [RS:0;101545f66cbd:36853 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T21:18:07,782 DEBUG [RS:0;101545f66cbd:36853 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T21:18:07,782 DEBUG [RS:0;101545f66cbd:36853 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T21:18:07,791 DEBUG [RS:0;101545f66cbd:36853 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T21:18:07,791 DEBUG [RS:0;101545f66cbd:36853 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c4133e5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=101545f66cbd/172.17.0.2:0 2024-12-03T21:18:07,801 DEBUG [RS:0;101545f66cbd:36853 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;101545f66cbd:36853 2024-12-03T21:18:07,801 INFO [RS:0;101545f66cbd:36853 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T21:18:07,801 INFO [RS:0;101545f66cbd:36853 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T21:18:07,801 DEBUG [RS:0;101545f66cbd:36853 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T21:18:07,802 INFO [RS:0;101545f66cbd:36853 {}] regionserver.HRegionServer(2659): reportForDuty to master=101545f66cbd,42287,1733260687142 with port=36853, startcode=1733260687336 2024-12-03T21:18:07,802 DEBUG [RS:0;101545f66cbd:36853 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T21:18:07,805 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43405, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T21:18:07,805 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42287 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 101545f66cbd,36853,1733260687336 2024-12-03T21:18:07,805 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42287 {}] master.ServerManager(517): Registering regionserver=101545f66cbd,36853,1733260687336 2024-12-03T21:18:07,807 DEBUG [RS:0;101545f66cbd:36853 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e 2024-12-03T21:18:07,807 DEBUG [RS:0;101545f66cbd:36853 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44367 2024-12-03T21:18:07,807 DEBUG [RS:0;101545f66cbd:36853 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T21:18:07,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42287-0x1019e58e8fb0000, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T21:18:07,815 DEBUG [RS:0;101545f66cbd:36853 {}] zookeeper.ZKUtil(111): regionserver:36853-0x1019e58e8fb0001, quorum=127.0.0.1:64771, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/101545f66cbd,36853,1733260687336 2024-12-03T21:18:07,815 WARN [RS:0;101545f66cbd:36853 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T21:18:07,816 INFO [RS:0;101545f66cbd:36853 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T21:18:07,816 DEBUG [RS:0;101545f66cbd:36853 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/WALs/101545f66cbd,36853,1733260687336 2024-12-03T21:18:07,816 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [101545f66cbd,36853,1733260687336] 2024-12-03T21:18:07,821 INFO [RS:0;101545f66cbd:36853 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T21:18:07,827 INFO [RS:0;101545f66cbd:36853 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T21:18:07,827 INFO [RS:0;101545f66cbd:36853 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T21:18:07,827 INFO [RS:0;101545f66cbd:36853 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:07,828 INFO [RS:0;101545f66cbd:36853 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T21:18:07,829 INFO [RS:0;101545f66cbd:36853 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T21:18:07,829 INFO [RS:0;101545f66cbd:36853 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:07,829 DEBUG [RS:0;101545f66cbd:36853 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:07,829 DEBUG [RS:0;101545f66cbd:36853 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:07,829 DEBUG [RS:0;101545f66cbd:36853 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:07,830 DEBUG [RS:0;101545f66cbd:36853 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:07,830 DEBUG [RS:0;101545f66cbd:36853 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:07,830 DEBUG [RS:0;101545f66cbd:36853 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/101545f66cbd:0, corePoolSize=2, maxPoolSize=2 2024-12-03T21:18:07,830 DEBUG [RS:0;101545f66cbd:36853 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:07,830 DEBUG [RS:0;101545f66cbd:36853 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:07,830 DEBUG [RS:0;101545f66cbd:36853 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:07,830 DEBUG [RS:0;101545f66cbd:36853 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:07,830 DEBUG [RS:0;101545f66cbd:36853 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:07,830 DEBUG [RS:0;101545f66cbd:36853 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:07,831 DEBUG [RS:0;101545f66cbd:36853 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/101545f66cbd:0, corePoolSize=3, maxPoolSize=3 2024-12-03T21:18:07,831 DEBUG [RS:0;101545f66cbd:36853 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0, corePoolSize=3, maxPoolSize=3 2024-12-03T21:18:07,831 INFO [RS:0;101545f66cbd:36853 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:07,831 INFO [RS:0;101545f66cbd:36853 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:07,831 INFO [RS:0;101545f66cbd:36853 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:07,831 INFO [RS:0;101545f66cbd:36853 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:07,832 INFO [RS:0;101545f66cbd:36853 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:07,832 INFO [RS:0;101545f66cbd:36853 {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,36853,1733260687336-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T21:18:07,852 INFO [RS:0;101545f66cbd:36853 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T21:18:07,853 INFO [RS:0;101545f66cbd:36853 {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,36853,1733260687336-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:07,853 INFO [RS:0;101545f66cbd:36853 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:07,853 INFO [RS:0;101545f66cbd:36853 {}] regionserver.Replication(171): 101545f66cbd,36853,1733260687336 started 2024-12-03T21:18:07,870 INFO [RS:0;101545f66cbd:36853 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:07,870 INFO [RS:0;101545f66cbd:36853 {}] regionserver.HRegionServer(1482): Serving as 101545f66cbd,36853,1733260687336, RpcServer on 101545f66cbd/172.17.0.2:36853, sessionid=0x1019e58e8fb0001 2024-12-03T21:18:07,870 DEBUG [RS:0;101545f66cbd:36853 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T21:18:07,870 DEBUG [RS:0;101545f66cbd:36853 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 101545f66cbd,36853,1733260687336 2024-12-03T21:18:07,870 DEBUG [RS:0;101545f66cbd:36853 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '101545f66cbd,36853,1733260687336' 2024-12-03T21:18:07,870 DEBUG [RS:0;101545f66cbd:36853 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T21:18:07,871 DEBUG [RS:0;101545f66cbd:36853 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T21:18:07,872 DEBUG [RS:0;101545f66cbd:36853 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T21:18:07,872 DEBUG [RS:0;101545f66cbd:36853 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T21:18:07,872 DEBUG [RS:0;101545f66cbd:36853 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 101545f66cbd,36853,1733260687336 2024-12-03T21:18:07,872 DEBUG [RS:0;101545f66cbd:36853 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '101545f66cbd,36853,1733260687336' 2024-12-03T21:18:07,872 DEBUG [RS:0;101545f66cbd:36853 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T21:18:07,872 DEBUG [RS:0;101545f66cbd:36853 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T21:18:07,873 DEBUG [RS:0;101545f66cbd:36853 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T21:18:07,873 INFO [RS:0;101545f66cbd:36853 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T21:18:07,873 INFO [RS:0;101545f66cbd:36853 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T21:18:07,977 INFO [RS:0;101545f66cbd:36853 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=101545f66cbd%2C36853%2C1733260687336, suffix=, logDir=hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/WALs/101545f66cbd,36853,1733260687336, archiveDir=hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/oldWALs, maxLogs=32 2024-12-03T21:18:07,981 INFO [RS:0;101545f66cbd:36853 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C36853%2C1733260687336.1733260687980 2024-12-03T21:18:07,989 INFO [RS:0;101545f66cbd:36853 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/WALs/101545f66cbd,36853,1733260687336/101545f66cbd%2C36853%2C1733260687336.1733260687980 2024-12-03T21:18:07,990 DEBUG [RS:0;101545f66cbd:36853 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39017:39017),(127.0.0.1/127.0.0.1:44479:44479)] 2024-12-03T21:18:08,118 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-03T21:18:08,118 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e 2024-12-03T21:18:08,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33897 is added to blk_1073741833_1009 (size=32) 2024-12-03T21:18:08,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44271 is added to blk_1073741833_1009 (size=32) 2024-12-03T21:18:08,128 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:18:08,130 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T21:18:08,131 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T21:18:08,131 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:08,132 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:18:08,132 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T21:18:08,134 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T21:18:08,134 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:08,134 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:18:08,134 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T21:18:08,136 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T21:18:08,136 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:08,137 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:18:08,137 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T21:18:08,138 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T21:18:08,138 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:08,139 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:18:08,139 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T21:18:08,140 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/data/hbase/meta/1588230740 2024-12-03T21:18:08,140 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/data/hbase/meta/1588230740 2024-12-03T21:18:08,142 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T21:18:08,142 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T21:18:08,142 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T21:18:08,143 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T21:18:08,145 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T21:18:08,146 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=870872, jitterRate=0.10737185180187225}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T21:18:08,147 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733260688128Initializing all the Stores at 1733260688129 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260688129Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260688129Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260688129Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260688129Cleaning up temporary data from old regions at 1733260688142 (+13 ms)Region opened successfully at 1733260688147 (+5 ms) 2024-12-03T21:18:08,147 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T21:18:08,147 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T21:18:08,147 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T21:18:08,147 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T21:18:08,147 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T21:18:08,147 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T21:18:08,147 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733260688147Disabling compacts and flushes for region at 1733260688147Disabling writes for close at 1733260688147Writing region close event to WAL at 1733260688147Closed at 1733260688147 2024-12-03T21:18:08,149 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T21:18:08,149 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-03T21:18:08,149 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T21:18:08,150 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T21:18:08,152 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T21:18:08,302 DEBUG [101545f66cbd:42287 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-03T21:18:08,303 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=101545f66cbd,36853,1733260687336 2024-12-03T21:18:08,305 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 101545f66cbd,36853,1733260687336, state=OPENING 2024-12-03T21:18:08,332 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T21:18:08,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42287-0x1019e58e8fb0000, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:08,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36853-0x1019e58e8fb0001, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:08,340 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T21:18:08,340 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T21:18:08,340 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T21:18:08,340 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=101545f66cbd,36853,1733260687336}] 2024-12-03T21:18:08,496 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T21:18:08,501 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45125, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T21:18:08,509 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-03T21:18:08,510 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T21:18:08,513 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=101545f66cbd%2C36853%2C1733260687336.meta, suffix=.meta, logDir=hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/WALs/101545f66cbd,36853,1733260687336, archiveDir=hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/oldWALs, maxLogs=32 2024-12-03T21:18:08,515 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C36853%2C1733260687336.meta.1733260688515.meta 2024-12-03T21:18:08,523 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/WALs/101545f66cbd,36853,1733260687336/101545f66cbd%2C36853%2C1733260687336.meta.1733260688515.meta 2024-12-03T21:18:08,524 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44479:44479),(127.0.0.1/127.0.0.1:39017:39017)] 2024-12-03T21:18:08,524 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T21:18:08,525 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T21:18:08,525 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T21:18:08,525 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T21:18:08,525 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T21:18:08,525 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:18:08,525 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-03T21:18:08,525 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-03T21:18:08,527 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T21:18:08,528 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T21:18:08,529 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:08,529 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:18:08,530 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T21:18:08,531 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T21:18:08,531 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:08,532 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:18:08,532 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T21:18:08,533 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T21:18:08,533 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:08,534 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:18:08,534 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T21:18:08,535 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T21:18:08,535 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:08,536 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:18:08,536 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T21:18:08,537 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/data/hbase/meta/1588230740 2024-12-03T21:18:08,539 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/data/hbase/meta/1588230740 2024-12-03T21:18:08,541 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T21:18:08,541 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T21:18:08,541 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T21:18:08,543 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T21:18:08,544 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=774534, jitterRate=-0.015129461884498596}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T21:18:08,544 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-03T21:18:08,545 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733260688525Writing region info on filesystem at 1733260688525Initializing all the Stores at 1733260688527 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260688527Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260688527Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260688527Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260688527Cleaning up temporary data from old regions at 1733260688541 (+14 ms)Running coprocessor post-open hooks at 1733260688544 (+3 ms)Region opened successfully at 1733260688545 (+1 ms) 2024-12-03T21:18:08,546 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733260688495 2024-12-03T21:18:08,549 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T21:18:08,550 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-03T21:18:08,551 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=101545f66cbd,36853,1733260687336 2024-12-03T21:18:08,552 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 101545f66cbd,36853,1733260687336, state=OPEN 2024-12-03T21:18:08,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36853-0x1019e58e8fb0001, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T21:18:08,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42287-0x1019e58e8fb0000, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T21:18:08,583 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=101545f66cbd,36853,1733260687336 2024-12-03T21:18:08,584 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T21:18:08,584 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T21:18:08,590 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T21:18:08,590 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=101545f66cbd,36853,1733260687336 in 243 msec 2024-12-03T21:18:08,597 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T21:18:08,598 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 441 msec 2024-12-03T21:18:08,599 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T21:18:08,599 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-03T21:18:08,601 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T21:18:08,601 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=101545f66cbd,36853,1733260687336, seqNum=-1] 2024-12-03T21:18:08,601 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T21:18:08,603 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46459, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T21:18:08,611 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 907 msec 2024-12-03T21:18:08,611 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733260688611, completionTime=-1 2024-12-03T21:18:08,611 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-03T21:18:08,611 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-03T21:18:08,614 INFO [master/101545f66cbd:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-03T21:18:08,614 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733260748614 2024-12-03T21:18:08,614 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733260808614 2024-12-03T21:18:08,614 INFO [master/101545f66cbd:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-03T21:18:08,614 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,42287,1733260687142-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:08,614 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,42287,1733260687142-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:08,614 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,42287,1733260687142-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:08,615 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-101545f66cbd:42287, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:08,615 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:08,615 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:08,617 DEBUG [master/101545f66cbd:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-03T21:18:08,620 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.197sec 2024-12-03T21:18:08,620 INFO [master/101545f66cbd:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T21:18:08,620 INFO [master/101545f66cbd:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T21:18:08,621 INFO [master/101545f66cbd:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T21:18:08,621 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T21:18:08,621 INFO [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T21:18:08,621 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,42287,1733260687142-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T21:18:08,621 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,42287,1733260687142-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T21:18:08,624 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-03T21:18:08,624 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T21:18:08,624 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,42287,1733260687142-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:08,673 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26d21920, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T21:18:08,673 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 101545f66cbd,42287,-1 for getting cluster id 2024-12-03T21:18:08,673 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T21:18:08,676 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '379b0954-3968-4d92-b00c-26b9becb398c' 2024-12-03T21:18:08,677 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T21:18:08,678 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "379b0954-3968-4d92-b00c-26b9becb398c" 2024-12-03T21:18:08,679 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@647d7b64, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T21:18:08,679 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [101545f66cbd,42287,-1] 2024-12-03T21:18:08,679 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T21:18:08,681 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:18:08,683 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52306, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T21:18:08,684 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16b23b42, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T21:18:08,685 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T21:18:08,686 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=101545f66cbd,36853,1733260687336, seqNum=-1] 2024-12-03T21:18:08,686 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T21:18:08,688 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49086, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T21:18:08,690 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=101545f66cbd,42287,1733260687142 2024-12-03T21:18:08,691 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:18:08,694 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-03T21:18:08,694 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-03T21:18:08,694 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T21:18:08,694 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T21:18:08,695 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:18:08,695 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:18:08,695 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T21:18:08,695 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T21:18:08,695 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1469549761, stopped=false 2024-12-03T21:18:08,695 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=101545f66cbd,42287,1733260687142 2024-12-03T21:18:08,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42287-0x1019e58e8fb0000, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T21:18:08,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36853-0x1019e58e8fb0001, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T21:18:08,712 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T21:18:08,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36853-0x1019e58e8fb0001, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:08,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42287-0x1019e58e8fb0000, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:08,712 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T21:18:08,713 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T21:18:08,713 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:18:08,713 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36853-0x1019e58e8fb0001, quorum=127.0.0.1:64771, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T21:18:08,713 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42287-0x1019e58e8fb0000, quorum=127.0.0.1:64771, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T21:18:08,713 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '101545f66cbd,36853,1733260687336' ***** 2024-12-03T21:18:08,713 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T21:18:08,713 INFO [RS:0;101545f66cbd:36853 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T21:18:08,713 INFO [RS:0;101545f66cbd:36853 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T21:18:08,713 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T21:18:08,714 INFO [RS:0;101545f66cbd:36853 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T21:18:08,714 INFO [RS:0;101545f66cbd:36853 {}] regionserver.HRegionServer(959): stopping server 101545f66cbd,36853,1733260687336 2024-12-03T21:18:08,714 INFO [RS:0;101545f66cbd:36853 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T21:18:08,714 INFO [RS:0;101545f66cbd:36853 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;101545f66cbd:36853. 2024-12-03T21:18:08,714 DEBUG [RS:0;101545f66cbd:36853 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T21:18:08,714 DEBUG [RS:0;101545f66cbd:36853 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:18:08,714 INFO [RS:0;101545f66cbd:36853 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T21:18:08,714 INFO [RS:0;101545f66cbd:36853 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T21:18:08,714 INFO [RS:0;101545f66cbd:36853 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T21:18:08,714 INFO [RS:0;101545f66cbd:36853 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-03T21:18:08,714 INFO [RS:0;101545f66cbd:36853 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-03T21:18:08,714 DEBUG [RS:0;101545f66cbd:36853 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-03T21:18:08,714 DEBUG [RS:0;101545f66cbd:36853 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-03T21:18:08,714 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T21:18:08,715 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T21:18:08,715 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T21:18:08,715 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T21:18:08,715 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T21:18:08,715 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-03T21:18:08,731 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/data/hbase/meta/1588230740/.tmp/ns/f27c52303d06410f9e1ff460e6b3a290 is 43, key is default/ns:d/1733260688604/Put/seqid=0 2024-12-03T21:18:08,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33897 is added to blk_1073741835_1011 (size=5153) 2024-12-03T21:18:08,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44271 is added to blk_1073741835_1011 (size=5153) 2024-12-03T21:18:08,747 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/data/hbase/meta/1588230740/.tmp/ns/f27c52303d06410f9e1ff460e6b3a290 2024-12-03T21:18:08,757 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/data/hbase/meta/1588230740/.tmp/ns/f27c52303d06410f9e1ff460e6b3a290 as hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/data/hbase/meta/1588230740/ns/f27c52303d06410f9e1ff460e6b3a290 2024-12-03T21:18:08,764 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/data/hbase/meta/1588230740/ns/f27c52303d06410f9e1ff460e6b3a290, entries=2, sequenceid=6, filesize=5.0 K 2024-12-03T21:18:08,766 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 51ms, sequenceid=6, compaction requested=false 2024-12-03T21:18:08,766 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-03T21:18:08,772 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-03T21:18:08,773 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T21:18:08,773 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T21:18:08,774 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733260688714Running coprocessor pre-close hooks at 1733260688714Disabling compacts and flushes for region at 1733260688714Disabling writes for close at 1733260688715 (+1 ms)Obtaining lock to block concurrent updates at 1733260688715Preparing flush snapshotting stores in 1588230740 at 1733260688715Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733260688715Flushing stores of hbase:meta,,1.1588230740 at 1733260688716 (+1 ms)Flushing 1588230740/ns: creating writer at 1733260688716Flushing 1588230740/ns: appending metadata at 1733260688731 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1733260688731Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@18155bf0: reopening flushed file at 1733260688755 (+24 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 51ms, sequenceid=6, compaction requested=false at 1733260688766 (+11 ms)Writing region close event to WAL at 1733260688767 (+1 ms)Running coprocessor post-close hooks at 1733260688773 (+6 ms)Closed at 1733260688773 2024-12-03T21:18:08,774 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-03T21:18:08,832 INFO [regionserver/101545f66cbd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-03T21:18:08,832 INFO [regionserver/101545f66cbd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-03T21:18:08,915 INFO [RS:0;101545f66cbd:36853 {}] regionserver.HRegionServer(976): stopping server 101545f66cbd,36853,1733260687336; all regions closed. 2024-12-03T21:18:08,916 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:08,917 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:08,917 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:08,918 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:08,918 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:08,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33897 is added to blk_1073741834_1010 (size=1152) 2024-12-03T21:18:08,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44271 is added to blk_1073741834_1010 (size=1152) 2024-12-03T21:18:08,929 DEBUG [RS:0;101545f66cbd:36853 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/oldWALs 2024-12-03T21:18:08,929 INFO [RS:0;101545f66cbd:36853 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 101545f66cbd%2C36853%2C1733260687336.meta:.meta(num 1733260688515) 2024-12-03T21:18:08,930 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:08,930 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:08,930 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:08,931 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:08,931 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:08,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44271 is added to blk_1073741832_1008 (size=93) 2024-12-03T21:18:08,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33897 is added to blk_1073741832_1008 (size=93) 2024-12-03T21:18:08,937 DEBUG [RS:0;101545f66cbd:36853 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/oldWALs 2024-12-03T21:18:08,937 INFO [RS:0;101545f66cbd:36853 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 101545f66cbd%2C36853%2C1733260687336:(num 1733260687980) 2024-12-03T21:18:08,937 DEBUG [RS:0;101545f66cbd:36853 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:18:08,937 INFO [RS:0;101545f66cbd:36853 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T21:18:08,937 INFO [RS:0;101545f66cbd:36853 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T21:18:08,937 INFO [RS:0;101545f66cbd:36853 {}] hbase.ChoreService(370): Chore service for: regionserver/101545f66cbd:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-03T21:18:08,938 INFO [RS:0;101545f66cbd:36853 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T21:18:08,938 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T21:18:08,938 INFO [RS:0;101545f66cbd:36853 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36853 2024-12-03T21:18:08,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42287-0x1019e58e8fb0000, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T21:18:08,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36853-0x1019e58e8fb0001, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/101545f66cbd,36853,1733260687336 2024-12-03T21:18:08,948 INFO [RS:0;101545f66cbd:36853 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T21:18:08,949 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [101545f66cbd,36853,1733260687336] 2024-12-03T21:18:08,964 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/101545f66cbd,36853,1733260687336 already deleted, retry=false 2024-12-03T21:18:08,965 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 101545f66cbd,36853,1733260687336 expired; onlineServers=0 2024-12-03T21:18:08,965 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '101545f66cbd,42287,1733260687142' ***** 2024-12-03T21:18:08,965 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T21:18:08,965 INFO [M:0;101545f66cbd:42287 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T21:18:08,965 INFO [M:0;101545f66cbd:42287 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T21:18:08,965 DEBUG [M:0;101545f66cbd:42287 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T21:18:08,965 DEBUG [M:0;101545f66cbd:42287 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T21:18:08,965 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T21:18:08,965 DEBUG [master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.large.0-1733260687708 {}] cleaner.HFileCleaner(306): Exit Thread[master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.large.0-1733260687708,5,FailOnTimeoutGroup] 2024-12-03T21:18:08,965 DEBUG [master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.small.0-1733260687708 {}] cleaner.HFileCleaner(306): Exit Thread[master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.small.0-1733260687708,5,FailOnTimeoutGroup] 2024-12-03T21:18:08,965 INFO [M:0;101545f66cbd:42287 {}] hbase.ChoreService(370): Chore service for: master/101545f66cbd:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-03T21:18:08,966 INFO [M:0;101545f66cbd:42287 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T21:18:08,966 DEBUG [M:0;101545f66cbd:42287 {}] master.HMaster(1795): Stopping service threads 2024-12-03T21:18:08,966 INFO [M:0;101545f66cbd:42287 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T21:18:08,966 INFO [M:0;101545f66cbd:42287 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T21:18:08,966 INFO [M:0;101545f66cbd:42287 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T21:18:08,966 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T21:18:08,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42287-0x1019e58e8fb0000, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T21:18:08,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42287-0x1019e58e8fb0000, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:08,973 DEBUG [M:0;101545f66cbd:42287 {}] zookeeper.ZKUtil(347): master:42287-0x1019e58e8fb0000, quorum=127.0.0.1:64771, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-03T21:18:08,973 WARN [M:0;101545f66cbd:42287 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-03T21:18:08,974 INFO [M:0;101545f66cbd:42287 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/.lastflushedseqids 2024-12-03T21:18:08,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33897 is added to blk_1073741836_1012 (size=99) 2024-12-03T21:18:08,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44271 is added to blk_1073741836_1012 (size=99) 2024-12-03T21:18:08,985 INFO [M:0;101545f66cbd:42287 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-03T21:18:08,985 INFO [M:0;101545f66cbd:42287 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T21:18:08,985 DEBUG [M:0;101545f66cbd:42287 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T21:18:08,985 INFO [M:0;101545f66cbd:42287 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:18:08,985 DEBUG [M:0;101545f66cbd:42287 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:18:08,985 DEBUG [M:0;101545f66cbd:42287 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T21:18:08,985 DEBUG [M:0;101545f66cbd:42287 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:18:08,985 INFO [M:0;101545f66cbd:42287 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-03T21:18:09,003 DEBUG [M:0;101545f66cbd:42287 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/96f55e74d549451eb810f7bf65de1630 is 82, key is hbase:meta,,1/info:regioninfo/1733260688551/Put/seqid=0 2024-12-03T21:18:09,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33897 is added to blk_1073741837_1013 (size=5672) 2024-12-03T21:18:09,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44271 is added to blk_1073741837_1013 (size=5672) 2024-12-03T21:18:09,014 INFO [M:0;101545f66cbd:42287 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/96f55e74d549451eb810f7bf65de1630 2024-12-03T21:18:09,036 DEBUG [M:0;101545f66cbd:42287 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5e8156e6be844b74bf570dbf32cbd6d1 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733260688610/Put/seqid=0 2024-12-03T21:18:09,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44271 is added to blk_1073741838_1014 (size=5275) 2024-12-03T21:18:09,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33897 is added to blk_1073741838_1014 (size=5275) 2024-12-03T21:18:09,042 INFO [M:0;101545f66cbd:42287 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5e8156e6be844b74bf570dbf32cbd6d1 2024-12-03T21:18:09,056 INFO [RS:0;101545f66cbd:36853 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T21:18:09,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36853-0x1019e58e8fb0001, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T21:18:09,057 INFO [RS:0;101545f66cbd:36853 {}] regionserver.HRegionServer(1031): Exiting; stopping=101545f66cbd,36853,1733260687336; zookeeper connection closed. 2024-12-03T21:18:09,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36853-0x1019e58e8fb0001, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T21:18:09,057 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@471dd4c4 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@471dd4c4 2024-12-03T21:18:09,057 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-03T21:18:09,064 DEBUG [M:0;101545f66cbd:42287 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c81eec916c5b45b1859823d986b0830a is 69, key is 101545f66cbd,36853,1733260687336/rs:state/1733260687805/Put/seqid=0 2024-12-03T21:18:09,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33897 is added to blk_1073741839_1015 (size=5156) 2024-12-03T21:18:09,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44271 is added to blk_1073741839_1015 (size=5156) 2024-12-03T21:18:09,070 INFO [M:0;101545f66cbd:42287 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c81eec916c5b45b1859823d986b0830a 2024-12-03T21:18:09,092 DEBUG [M:0;101545f66cbd:42287 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4313547d793541eba4fc6185795818a2 is 52, key is load_balancer_on/state:d/1733260688692/Put/seqid=0 2024-12-03T21:18:09,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33897 is added to blk_1073741840_1016 (size=5056) 2024-12-03T21:18:09,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44271 is added to blk_1073741840_1016 (size=5056) 2024-12-03T21:18:09,097 INFO [M:0;101545f66cbd:42287 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4313547d793541eba4fc6185795818a2 2024-12-03T21:18:09,104 DEBUG [M:0;101545f66cbd:42287 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/96f55e74d549451eb810f7bf65de1630 as hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/96f55e74d549451eb810f7bf65de1630 2024-12-03T21:18:09,109 INFO [M:0;101545f66cbd:42287 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/96f55e74d549451eb810f7bf65de1630, entries=8, sequenceid=29, filesize=5.5 K 2024-12-03T21:18:09,111 DEBUG [M:0;101545f66cbd:42287 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5e8156e6be844b74bf570dbf32cbd6d1 as hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5e8156e6be844b74bf570dbf32cbd6d1 2024-12-03T21:18:09,117 INFO [M:0;101545f66cbd:42287 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5e8156e6be844b74bf570dbf32cbd6d1, entries=3, sequenceid=29, filesize=5.2 K 2024-12-03T21:18:09,118 DEBUG [M:0;101545f66cbd:42287 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c81eec916c5b45b1859823d986b0830a as hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c81eec916c5b45b1859823d986b0830a 2024-12-03T21:18:09,124 INFO [M:0;101545f66cbd:42287 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c81eec916c5b45b1859823d986b0830a, entries=1, sequenceid=29, filesize=5.0 K 2024-12-03T21:18:09,126 DEBUG [M:0;101545f66cbd:42287 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4313547d793541eba4fc6185795818a2 as hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4313547d793541eba4fc6185795818a2 2024-12-03T21:18:09,131 INFO [M:0;101545f66cbd:42287 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44367/user/jenkins/test-data/d3edaaac-5166-cb3c-cea1-921d7a317a5e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4313547d793541eba4fc6185795818a2, entries=1, sequenceid=29, filesize=4.9 K 2024-12-03T21:18:09,132 INFO [M:0;101545f66cbd:42287 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 147ms, sequenceid=29, compaction requested=false 2024-12-03T21:18:09,134 INFO [M:0;101545f66cbd:42287 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:18:09,134 DEBUG [M:0;101545f66cbd:42287 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733260688985Disabling compacts and flushes for region at 1733260688985Disabling writes for close at 1733260688985Obtaining lock to block concurrent updates at 1733260688985Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733260688985Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733260688986 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733260688987 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733260688987Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733260689002 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733260689003 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733260689020 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733260689036 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733260689036Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733260689048 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733260689063 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733260689063Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733260689075 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733260689091 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733260689091Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@938bf21: reopening flushed file at 1733260689103 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@25dc1c24: reopening flushed file at 1733260689110 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@16e65a98: reopening flushed file at 1733260689117 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1f05af90: reopening flushed file at 1733260689125 (+8 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 147ms, sequenceid=29, compaction requested=false at 1733260689132 (+7 ms)Writing region close event to WAL at 1733260689134 (+2 ms)Closed at 1733260689134 2024-12-03T21:18:09,135 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:09,135 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:09,136 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:09,136 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:09,136 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:09,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44271 is added to blk_1073741830_1006 (size=10311) 2024-12-03T21:18:09,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33897 is added to blk_1073741830_1006 (size=10311) 2024-12-03T21:18:09,139 INFO [M:0;101545f66cbd:42287 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-03T21:18:09,139 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T21:18:09,139 INFO [M:0;101545f66cbd:42287 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42287 2024-12-03T21:18:09,139 INFO [M:0;101545f66cbd:42287 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T21:18:09,202 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:18:09,209 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:18:09,248 INFO [M:0;101545f66cbd:42287 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T21:18:09,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42287-0x1019e58e8fb0000, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T21:18:09,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42287-0x1019e58e8fb0000, quorum=127.0.0.1:64771, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T21:18:09,251 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@513d1177{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:18:09,252 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3c0a2992{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T21:18:09,252 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T21:18:09,252 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1db3e806{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T21:18:09,253 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c5ed954{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/hadoop.log.dir/,STOPPED} 2024-12-03T21:18:09,254 WARN [BP-356274704-172.17.0.2-1733260684866 heartbeating to localhost/127.0.0.1:44367 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T21:18:09,254 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T21:18:09,255 WARN [BP-356274704-172.17.0.2-1733260684866 heartbeating to localhost/127.0.0.1:44367 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-356274704-172.17.0.2-1733260684866 (Datanode Uuid c48fce4f-e911-44ad-8127-8fe377e83144) service to localhost/127.0.0.1:44367 2024-12-03T21:18:09,255 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T21:18:09,256 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/cluster_3a4e0250-62c5-90f6-5f45-566e5c61742f/data/data3/current/BP-356274704-172.17.0.2-1733260684866 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:18:09,256 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/cluster_3a4e0250-62c5-90f6-5f45-566e5c61742f/data/data4/current/BP-356274704-172.17.0.2-1733260684866 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:18:09,256 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T21:18:09,259 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4a843c6e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:18:09,259 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@17783943{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T21:18:09,260 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T21:18:09,260 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4b4bedb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T21:18:09,260 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a3a779{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/hadoop.log.dir/,STOPPED} 2024-12-03T21:18:09,261 WARN [BP-356274704-172.17.0.2-1733260684866 heartbeating to localhost/127.0.0.1:44367 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T21:18:09,261 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T21:18:09,262 WARN [BP-356274704-172.17.0.2-1733260684866 heartbeating to localhost/127.0.0.1:44367 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-356274704-172.17.0.2-1733260684866 (Datanode Uuid b5add678-ed02-4ce3-86eb-81bc629c7d93) service to localhost/127.0.0.1:44367 2024-12-03T21:18:09,262 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T21:18:09,262 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/cluster_3a4e0250-62c5-90f6-5f45-566e5c61742f/data/data1/current/BP-356274704-172.17.0.2-1733260684866 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:18:09,263 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/cluster_3a4e0250-62c5-90f6-5f45-566e5c61742f/data/data2/current/BP-356274704-172.17.0.2-1733260684866 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:18:09,263 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T21:18:09,267 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2787d4b6{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T21:18:09,268 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6f450072{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T21:18:09,268 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T21:18:09,268 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@495543c4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T21:18:09,268 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c8f0dfe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/hadoop.log.dir/,STOPPED} 2024-12-03T21:18:09,273 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-03T21:18:09,289 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-03T21:18:09,289 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T21:18:09,289 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/hadoop.log.dir so I do NOT create it in target/test-data/e9d42888-668d-aaec-8e33-515f6374998c 2024-12-03T21:18:09,289 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/116b5f55-1925-8731-d9d6-a00f34fa63de/hadoop.tmp.dir so I do NOT create it in target/test-data/e9d42888-668d-aaec-8e33-515f6374998c 2024-12-03T21:18:09,289 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c, deleteOnExit=true 2024-12-03T21:18:09,289 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-03T21:18:09,290 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/test.cache.data in system properties and HBase conf 2024-12-03T21:18:09,290 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T21:18:09,290 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/hadoop.log.dir in system properties and HBase conf 2024-12-03T21:18:09,290 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T21:18:09,290 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T21:18:09,290 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-03T21:18:09,290 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T21:18:09,290 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T21:18:09,290 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T21:18:09,291 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T21:18:09,291 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T21:18:09,291 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T21:18:09,291 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T21:18:09,291 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T21:18:09,291 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T21:18:09,291 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T21:18:09,291 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/nfs.dump.dir in system properties and HBase conf 2024-12-03T21:18:09,291 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/java.io.tmpdir in system properties and HBase conf 2024-12-03T21:18:09,291 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T21:18:09,291 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T21:18:09,292 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T21:18:09,304 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T21:18:09,561 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-03T21:18:09,563 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:18:09,579 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:18:09,581 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:18:09,581 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:18:09,602 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:18:09,606 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T21:18:09,607 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T21:18:09,607 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T21:18:09,608 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T21:18:09,610 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:18:09,610 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@100caf4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/hadoop.log.dir/,AVAILABLE} 2024-12-03T21:18:09,611 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c3e366f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T21:18:09,701 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@acc902f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/java.io.tmpdir/jetty-localhost-34345-hadoop-hdfs-3_4_1-tests_jar-_-any-15137098119272539883/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T21:18:09,702 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@71ccb409{HTTP/1.1, (http/1.1)}{localhost:34345} 2024-12-03T21:18:09,702 INFO [Time-limited test {}] server.Server(415): Started @102800ms 2024-12-03T21:18:09,714 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T21:18:09,832 INFO [regionserver/101545f66cbd:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T21:18:10,079 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:18:10,082 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T21:18:10,083 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T21:18:10,083 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T21:18:10,083 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T21:18:10,083 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ad2b72e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/hadoop.log.dir/,AVAILABLE} 2024-12-03T21:18:10,084 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ed9c958{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T21:18:10,174 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@527a79d3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/java.io.tmpdir/jetty-localhost-38775-hadoop-hdfs-3_4_1-tests_jar-_-any-5042412494960404866/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:18:10,174 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@78ac40e9{HTTP/1.1, (http/1.1)}{localhost:38775} 2024-12-03T21:18:10,174 INFO [Time-limited test {}] server.Server(415): Started @103272ms 2024-12-03T21:18:10,176 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T21:18:10,208 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:18:10,211 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T21:18:10,212 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T21:18:10,212 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T21:18:10,212 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T21:18:10,212 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3f186434{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/hadoop.log.dir/,AVAILABLE} 2024-12-03T21:18:10,213 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@d227195{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T21:18:10,306 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6cc9fc90{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/java.io.tmpdir/jetty-localhost-35099-hadoop-hdfs-3_4_1-tests_jar-_-any-1790619898853073664/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:18:10,307 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7909691f{HTTP/1.1, (http/1.1)}{localhost:35099} 2024-12-03T21:18:10,307 INFO [Time-limited test {}] server.Server(415): Started @103405ms 2024-12-03T21:18:10,308 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T21:18:11,093 WARN [Thread-665 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data1/current/BP-698693125-172.17.0.2-1733260689316/current, will proceed with Du for space computation calculation, 2024-12-03T21:18:11,093 WARN [Thread-666 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data2/current/BP-698693125-172.17.0.2-1733260689316/current, will proceed with Du for space computation calculation, 2024-12-03T21:18:11,113 WARN [Thread-629 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T21:18:11,115 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfe0cfef48563ead7 with lease ID 0x4bbc22489a7baa50: Processing first storage report for DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c from datanode DatanodeRegistration(127.0.0.1:39781, datanodeUuid=e8625039-e36f-45c3-8b0a-40844ce3dcaf, infoPort=38597, infoSecurePort=0, ipcPort=33003, storageInfo=lv=-57;cid=testClusterID;nsid=1541511070;c=1733260689316) 2024-12-03T21:18:11,115 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfe0cfef48563ead7 with lease ID 0x4bbc22489a7baa50: from storage DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c node DatanodeRegistration(127.0.0.1:39781, datanodeUuid=e8625039-e36f-45c3-8b0a-40844ce3dcaf, infoPort=38597, infoSecurePort=0, ipcPort=33003, storageInfo=lv=-57;cid=testClusterID;nsid=1541511070;c=1733260689316), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:18:11,115 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfe0cfef48563ead7 with lease ID 0x4bbc22489a7baa50: Processing first storage report for DS-66e0c595-cbcb-4fbf-81f8-06e04f35879e from datanode DatanodeRegistration(127.0.0.1:39781, datanodeUuid=e8625039-e36f-45c3-8b0a-40844ce3dcaf, infoPort=38597, infoSecurePort=0, ipcPort=33003, storageInfo=lv=-57;cid=testClusterID;nsid=1541511070;c=1733260689316) 2024-12-03T21:18:11,116 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfe0cfef48563ead7 with lease ID 0x4bbc22489a7baa50: from storage DS-66e0c595-cbcb-4fbf-81f8-06e04f35879e node DatanodeRegistration(127.0.0.1:39781, datanodeUuid=e8625039-e36f-45c3-8b0a-40844ce3dcaf, infoPort=38597, infoSecurePort=0, ipcPort=33003, storageInfo=lv=-57;cid=testClusterID;nsid=1541511070;c=1733260689316), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:18:11,285 WARN [Thread-676 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data3/current/BP-698693125-172.17.0.2-1733260689316/current, will proceed with Du for space computation calculation, 2024-12-03T21:18:11,286 WARN [Thread-677 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data4/current/BP-698693125-172.17.0.2-1733260689316/current, will proceed with Du for space computation calculation, 2024-12-03T21:18:11,303 WARN [Thread-652 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T21:18:11,305 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4b4f58aeb63c14b4 with lease ID 0x4bbc22489a7baa51: Processing first storage report for DS-c933cce1-4c78-4076-82ad-cc6d66d06618 from datanode DatanodeRegistration(127.0.0.1:39985, datanodeUuid=cbf1eb3c-64f4-4d13-a033-84daec941fc5, infoPort=44649, infoSecurePort=0, ipcPort=46021, storageInfo=lv=-57;cid=testClusterID;nsid=1541511070;c=1733260689316) 2024-12-03T21:18:11,305 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4b4f58aeb63c14b4 with lease ID 0x4bbc22489a7baa51: from storage DS-c933cce1-4c78-4076-82ad-cc6d66d06618 node DatanodeRegistration(127.0.0.1:39985, datanodeUuid=cbf1eb3c-64f4-4d13-a033-84daec941fc5, infoPort=44649, infoSecurePort=0, ipcPort=46021, storageInfo=lv=-57;cid=testClusterID;nsid=1541511070;c=1733260689316), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:18:11,305 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4b4f58aeb63c14b4 with lease ID 0x4bbc22489a7baa51: Processing first storage report for DS-15d26117-d0e6-4d32-99c4-a1282fe98f6e from datanode DatanodeRegistration(127.0.0.1:39985, datanodeUuid=cbf1eb3c-64f4-4d13-a033-84daec941fc5, infoPort=44649, infoSecurePort=0, ipcPort=46021, storageInfo=lv=-57;cid=testClusterID;nsid=1541511070;c=1733260689316) 2024-12-03T21:18:11,305 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4b4f58aeb63c14b4 with lease ID 0x4bbc22489a7baa51: from storage DS-15d26117-d0e6-4d32-99c4-a1282fe98f6e node DatanodeRegistration(127.0.0.1:39985, datanodeUuid=cbf1eb3c-64f4-4d13-a033-84daec941fc5, infoPort=44649, infoSecurePort=0, ipcPort=46021, storageInfo=lv=-57;cid=testClusterID;nsid=1541511070;c=1733260689316), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:18:11,342 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c 2024-12-03T21:18:11,346 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/zookeeper_0, clientPort=59685, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T21:18:11,347 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59685 2024-12-03T21:18:11,347 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:18:11,350 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:18:11,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39781 is added to blk_1073741825_1001 (size=7) 2024-12-03T21:18:11,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39985 is added to blk_1073741825_1001 (size=7) 2024-12-03T21:18:11,364 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31 with version=8 2024-12-03T21:18:11,364 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/hbase-staging 2024-12-03T21:18:11,366 INFO [Time-limited test {}] client.ConnectionUtils(128): master/101545f66cbd:0 server-side Connection retries=45 2024-12-03T21:18:11,366 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T21:18:11,366 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T21:18:11,366 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T21:18:11,367 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T21:18:11,367 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T21:18:11,367 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-03T21:18:11,367 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T21:18:11,368 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36475 2024-12-03T21:18:11,370 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36475 connecting to ZooKeeper ensemble=127.0.0.1:59685 2024-12-03T21:18:11,433 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:364750x0, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T21:18:11,434 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36475-0x1019e58f9790000 connected 2024-12-03T21:18:11,515 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:18:11,516 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:18:11,519 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36475-0x1019e58f9790000, quorum=127.0.0.1:59685, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T21:18:11,519 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31, hbase.cluster.distributed=false 2024-12-03T21:18:11,521 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36475-0x1019e58f9790000, quorum=127.0.0.1:59685, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T21:18:11,523 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36475 2024-12-03T21:18:11,524 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36475 2024-12-03T21:18:11,525 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36475 2024-12-03T21:18:11,527 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36475 2024-12-03T21:18:11,527 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36475 2024-12-03T21:18:11,542 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/101545f66cbd:0 server-side Connection retries=45 2024-12-03T21:18:11,542 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T21:18:11,542 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T21:18:11,542 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T21:18:11,542 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T21:18:11,542 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T21:18:11,542 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T21:18:11,542 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T21:18:11,543 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45695 2024-12-03T21:18:11,545 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45695 connecting to ZooKeeper ensemble=127.0.0.1:59685 2024-12-03T21:18:11,545 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:18:11,547 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:18:11,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:456950x0, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T21:18:11,557 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:456950x0, quorum=127.0.0.1:59685, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T21:18:11,557 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45695-0x1019e58f9790001 connected 2024-12-03T21:18:11,557 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T21:18:11,559 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T21:18:11,560 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45695-0x1019e58f9790001, quorum=127.0.0.1:59685, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T21:18:11,561 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45695-0x1019e58f9790001, quorum=127.0.0.1:59685, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T21:18:11,563 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45695 2024-12-03T21:18:11,564 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45695 2024-12-03T21:18:11,565 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45695 2024-12-03T21:18:11,567 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45695 2024-12-03T21:18:11,570 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45695 2024-12-03T21:18:11,582 DEBUG [M:0;101545f66cbd:36475 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;101545f66cbd:36475 2024-12-03T21:18:11,583 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/101545f66cbd,36475,1733260691366 2024-12-03T21:18:11,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36475-0x1019e58f9790000, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T21:18:11,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45695-0x1019e58f9790001, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T21:18:11,600 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36475-0x1019e58f9790000, quorum=127.0.0.1:59685, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/101545f66cbd,36475,1733260691366 2024-12-03T21:18:11,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45695-0x1019e58f9790001, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T21:18:11,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36475-0x1019e58f9790000, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:11,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45695-0x1019e58f9790001, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:11,617 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36475-0x1019e58f9790000, quorum=127.0.0.1:59685, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T21:18:11,618 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/101545f66cbd,36475,1733260691366 from backup master directory 2024-12-03T21:18:11,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36475-0x1019e58f9790000, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/101545f66cbd,36475,1733260691366 2024-12-03T21:18:11,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45695-0x1019e58f9790001, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T21:18:11,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36475-0x1019e58f9790000, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T21:18:11,633 WARN [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T21:18:11,633 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=101545f66cbd,36475,1733260691366 2024-12-03T21:18:11,640 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/hbase.id] with ID: 422f05c9-356f-4c11-a671-97641791f44b 2024-12-03T21:18:11,640 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/.tmp/hbase.id 2024-12-03T21:18:11,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39985 is added to blk_1073741826_1002 (size=42) 2024-12-03T21:18:11,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39781 is added to blk_1073741826_1002 (size=42) 2024-12-03T21:18:11,659 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/.tmp/hbase.id]:[hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/hbase.id] 2024-12-03T21:18:11,675 INFO [master/101545f66cbd:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:18:11,675 INFO [master/101545f66cbd:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-03T21:18:11,677 INFO [master/101545f66cbd:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-03T21:18:11,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45695-0x1019e58f9790001, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:11,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36475-0x1019e58f9790000, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:11,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39781 is added to blk_1073741827_1003 (size=196) 2024-12-03T21:18:11,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39985 is added to blk_1073741827_1003 (size=196) 2024-12-03T21:18:11,695 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T21:18:11,696 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T21:18:11,696 INFO [master/101545f66cbd:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T21:18:11,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39781 is added to blk_1073741828_1004 (size=1189) 2024-12-03T21:18:11,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39985 is added to blk_1073741828_1004 (size=1189) 2024-12-03T21:18:11,706 INFO [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/data/master/store 2024-12-03T21:18:11,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39985 is added to blk_1073741829_1005 (size=34) 2024-12-03T21:18:11,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39781 is added to blk_1073741829_1005 (size=34) 2024-12-03T21:18:11,713 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:18:11,713 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T21:18:11,713 INFO [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:18:11,714 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:18:11,714 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T21:18:11,714 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:18:11,714 INFO [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:18:11,714 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733260691713Disabling compacts and flushes for region at 1733260691713Disabling writes for close at 1733260691714 (+1 ms)Writing region close event to WAL at 1733260691714Closed at 1733260691714 2024-12-03T21:18:11,715 WARN [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/data/master/store/.initializing 2024-12-03T21:18:11,715 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/WALs/101545f66cbd,36475,1733260691366 2024-12-03T21:18:11,717 INFO [master/101545f66cbd:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=101545f66cbd%2C36475%2C1733260691366, suffix=, logDir=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/WALs/101545f66cbd,36475,1733260691366, archiveDir=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/oldWALs, maxLogs=10 2024-12-03T21:18:11,718 INFO [master/101545f66cbd:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C36475%2C1733260691366.1733260691718 2024-12-03T21:18:11,723 INFO [master/101545f66cbd:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/WALs/101545f66cbd,36475,1733260691366/101545f66cbd%2C36475%2C1733260691366.1733260691718 2024-12-03T21:18:11,727 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38597:38597),(127.0.0.1/127.0.0.1:44649:44649)] 2024-12-03T21:18:11,728 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T21:18:11,728 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:18:11,728 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:11,728 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:11,731 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:11,733 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T21:18:11,733 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:11,734 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:18:11,734 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:11,736 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T21:18:11,736 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:11,738 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T21:18:11,738 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:11,746 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T21:18:11,746 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:11,747 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T21:18:11,747 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:11,749 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T21:18:11,749 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:11,750 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T21:18:11,750 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:11,751 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:11,752 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:11,753 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:11,754 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:11,754 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T21:18:11,756 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:11,759 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T21:18:11,760 INFO [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=812328, jitterRate=0.03292882442474365}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T21:18:11,761 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733260691728Initializing all the Stores at 1733260691729 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260691729Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260691731 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260691731Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260691731Cleaning up temporary data from old regions at 1733260691754 (+23 ms)Region opened successfully at 1733260691760 (+6 ms) 2024-12-03T21:18:11,761 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T21:18:11,766 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17894381, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=101545f66cbd/172.17.0.2:0 2024-12-03T21:18:11,767 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-03T21:18:11,767 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T21:18:11,767 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T21:18:11,767 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T21:18:11,768 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-03T21:18:11,768 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-03T21:18:11,768 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T21:18:11,771 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T21:18:11,772 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36475-0x1019e58f9790000, quorum=127.0.0.1:59685, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T21:18:11,781 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-03T21:18:11,782 INFO [master/101545f66cbd:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T21:18:11,782 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36475-0x1019e58f9790000, quorum=127.0.0.1:59685, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T21:18:11,789 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-03T21:18:11,790 INFO [master/101545f66cbd:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T21:18:11,792 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36475-0x1019e58f9790000, quorum=127.0.0.1:59685, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T21:18:11,798 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-03T21:18:11,799 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36475-0x1019e58f9790000, quorum=127.0.0.1:59685, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T21:18:11,807 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T21:18:11,811 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36475-0x1019e58f9790000, quorum=127.0.0.1:59685, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T21:18:11,823 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T21:18:11,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45695-0x1019e58f9790001, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T21:18:11,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45695-0x1019e58f9790001, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:11,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36475-0x1019e58f9790000, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T21:18:11,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36475-0x1019e58f9790000, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:11,836 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=101545f66cbd,36475,1733260691366, sessionid=0x1019e58f9790000, setting cluster-up flag (Was=false) 2024-12-03T21:18:11,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36475-0x1019e58f9790000, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:11,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45695-0x1019e58f9790001, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:11,889 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T21:18:11,890 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=101545f66cbd,36475,1733260691366 2024-12-03T21:18:11,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45695-0x1019e58f9790001, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:11,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36475-0x1019e58f9790000, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:11,931 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T21:18:11,933 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=101545f66cbd,36475,1733260691366 2024-12-03T21:18:11,935 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-03T21:18:11,938 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-03T21:18:11,939 INFO [master/101545f66cbd:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-03T21:18:11,939 INFO [master/101545f66cbd:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T21:18:11,939 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 101545f66cbd,36475,1733260691366 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T21:18:11,942 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/101545f66cbd:0, corePoolSize=5, maxPoolSize=5 2024-12-03T21:18:11,942 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/101545f66cbd:0, corePoolSize=5, maxPoolSize=5 2024-12-03T21:18:11,942 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/101545f66cbd:0, corePoolSize=5, maxPoolSize=5 2024-12-03T21:18:11,942 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/101545f66cbd:0, corePoolSize=5, maxPoolSize=5 2024-12-03T21:18:11,942 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/101545f66cbd:0, corePoolSize=10, maxPoolSize=10 2024-12-03T21:18:11,942 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:11,942 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/101545f66cbd:0, corePoolSize=2, maxPoolSize=2 2024-12-03T21:18:11,942 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:11,943 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733260721943 2024-12-03T21:18:11,943 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T21:18:11,943 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T21:18:11,943 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T21:18:11,943 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T21:18:11,943 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T21:18:11,944 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T21:18:11,944 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:11,944 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T21:18:11,944 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T21:18:11,944 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T21:18:11,944 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T21:18:11,944 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-03T21:18:11,945 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T21:18:11,945 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T21:18:11,945 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.large.0-1733260691945,5,FailOnTimeoutGroup] 2024-12-03T21:18:11,945 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.small.0-1733260691945,5,FailOnTimeoutGroup] 2024-12-03T21:18:11,945 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:11,945 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T21:18:11,945 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:11,945 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:11,946 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:11,946 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T21:18:11,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39985 is added to blk_1073741831_1007 (size=1321) 2024-12-03T21:18:11,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39781 is added to blk_1073741831_1007 (size=1321) 2024-12-03T21:18:11,954 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-03T21:18:11,954 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31 2024-12-03T21:18:11,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39985 is added to blk_1073741832_1008 (size=32) 2024-12-03T21:18:11,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39781 is added to blk_1073741832_1008 (size=32) 2024-12-03T21:18:11,962 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:18:11,964 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T21:18:11,965 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T21:18:11,965 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:11,966 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:18:11,966 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T21:18:11,968 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T21:18:11,968 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:11,968 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:18:11,968 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T21:18:11,970 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T21:18:11,970 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:11,971 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:18:11,971 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T21:18:11,972 INFO [RS:0;101545f66cbd:45695 {}] regionserver.HRegionServer(746): ClusterId : 422f05c9-356f-4c11-a671-97641791f44b 2024-12-03T21:18:11,972 DEBUG [RS:0;101545f66cbd:45695 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T21:18:11,973 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T21:18:11,973 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:11,974 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:18:11,974 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T21:18:11,975 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/hbase/meta/1588230740 2024-12-03T21:18:11,975 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/hbase/meta/1588230740 2024-12-03T21:18:11,976 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T21:18:11,976 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T21:18:11,977 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T21:18:11,978 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T21:18:11,980 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T21:18:11,981 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=873891, jitterRate=0.11121039092540741}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T21:18:11,982 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733260691962Initializing all the Stores at 1733260691963 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260691963Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260691964 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260691964Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260691964Cleaning up temporary data from old regions at 1733260691976 (+12 ms)Region opened successfully at 1733260691981 (+5 ms) 2024-12-03T21:18:11,982 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T21:18:11,982 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T21:18:11,982 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T21:18:11,982 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T21:18:11,982 DEBUG [RS:0;101545f66cbd:45695 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T21:18:11,982 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T21:18:11,982 DEBUG [RS:0;101545f66cbd:45695 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T21:18:11,982 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T21:18:11,982 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733260691982Disabling compacts and flushes for region at 1733260691982Disabling writes for close at 1733260691982Writing region close event to WAL at 1733260691982Closed at 1733260691982 2024-12-03T21:18:11,984 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T21:18:11,984 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-03T21:18:11,984 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T21:18:11,986 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T21:18:11,987 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T21:18:11,990 DEBUG [RS:0;101545f66cbd:45695 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T21:18:11,991 DEBUG [RS:0;101545f66cbd:45695 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4606f62b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=101545f66cbd/172.17.0.2:0 2024-12-03T21:18:12,001 DEBUG [RS:0;101545f66cbd:45695 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;101545f66cbd:45695 2024-12-03T21:18:12,002 INFO [RS:0;101545f66cbd:45695 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T21:18:12,002 INFO [RS:0;101545f66cbd:45695 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T21:18:12,002 DEBUG [RS:0;101545f66cbd:45695 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T21:18:12,002 INFO [RS:0;101545f66cbd:45695 {}] regionserver.HRegionServer(2659): reportForDuty to master=101545f66cbd,36475,1733260691366 with port=45695, startcode=1733260691542 2024-12-03T21:18:12,003 DEBUG [RS:0;101545f66cbd:45695 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T21:18:12,005 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42673, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T21:18:12,005 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36475 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 101545f66cbd,45695,1733260691542 2024-12-03T21:18:12,005 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36475 {}] master.ServerManager(517): Registering regionserver=101545f66cbd,45695,1733260691542 2024-12-03T21:18:12,007 DEBUG [RS:0;101545f66cbd:45695 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31 2024-12-03T21:18:12,007 DEBUG [RS:0;101545f66cbd:45695 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36575 2024-12-03T21:18:12,007 DEBUG [RS:0;101545f66cbd:45695 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T21:18:12,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36475-0x1019e58f9790000, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T21:18:12,015 DEBUG [RS:0;101545f66cbd:45695 {}] zookeeper.ZKUtil(111): regionserver:45695-0x1019e58f9790001, quorum=127.0.0.1:59685, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/101545f66cbd,45695,1733260691542 2024-12-03T21:18:12,015 WARN [RS:0;101545f66cbd:45695 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T21:18:12,015 INFO [RS:0;101545f66cbd:45695 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T21:18:12,015 DEBUG [RS:0;101545f66cbd:45695 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542 2024-12-03T21:18:12,015 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [101545f66cbd,45695,1733260691542] 2024-12-03T21:18:12,019 INFO [RS:0;101545f66cbd:45695 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T21:18:12,021 INFO [RS:0;101545f66cbd:45695 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T21:18:12,022 INFO [RS:0;101545f66cbd:45695 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T21:18:12,022 INFO [RS:0;101545f66cbd:45695 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:12,022 INFO [RS:0;101545f66cbd:45695 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T21:18:12,023 INFO [RS:0;101545f66cbd:45695 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T21:18:12,023 INFO [RS:0;101545f66cbd:45695 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:12,023 DEBUG [RS:0;101545f66cbd:45695 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:12,024 DEBUG [RS:0;101545f66cbd:45695 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:12,024 DEBUG [RS:0;101545f66cbd:45695 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:12,024 DEBUG [RS:0;101545f66cbd:45695 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:12,024 DEBUG [RS:0;101545f66cbd:45695 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:12,024 DEBUG [RS:0;101545f66cbd:45695 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/101545f66cbd:0, corePoolSize=2, maxPoolSize=2 2024-12-03T21:18:12,024 DEBUG [RS:0;101545f66cbd:45695 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:12,024 DEBUG [RS:0;101545f66cbd:45695 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:12,024 DEBUG [RS:0;101545f66cbd:45695 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:12,024 DEBUG [RS:0;101545f66cbd:45695 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:12,024 DEBUG [RS:0;101545f66cbd:45695 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:12,024 DEBUG [RS:0;101545f66cbd:45695 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:12,024 DEBUG [RS:0;101545f66cbd:45695 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/101545f66cbd:0, corePoolSize=3, maxPoolSize=3 2024-12-03T21:18:12,024 DEBUG [RS:0;101545f66cbd:45695 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0, corePoolSize=3, maxPoolSize=3 2024-12-03T21:18:12,025 INFO [RS:0;101545f66cbd:45695 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:12,025 INFO [RS:0;101545f66cbd:45695 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:12,025 INFO [RS:0;101545f66cbd:45695 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:12,025 INFO [RS:0;101545f66cbd:45695 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:12,025 INFO [RS:0;101545f66cbd:45695 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:12,025 INFO [RS:0;101545f66cbd:45695 {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,45695,1733260691542-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T21:18:12,038 INFO [RS:0;101545f66cbd:45695 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T21:18:12,038 INFO [RS:0;101545f66cbd:45695 {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,45695,1733260691542-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:12,038 INFO [RS:0;101545f66cbd:45695 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:12,038 INFO [RS:0;101545f66cbd:45695 {}] regionserver.Replication(171): 101545f66cbd,45695,1733260691542 started 2024-12-03T21:18:12,051 INFO [RS:0;101545f66cbd:45695 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:12,051 INFO [RS:0;101545f66cbd:45695 {}] regionserver.HRegionServer(1482): Serving as 101545f66cbd,45695,1733260691542, RpcServer on 101545f66cbd/172.17.0.2:45695, sessionid=0x1019e58f9790001 2024-12-03T21:18:12,052 DEBUG [RS:0;101545f66cbd:45695 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T21:18:12,052 DEBUG [RS:0;101545f66cbd:45695 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 101545f66cbd,45695,1733260691542 2024-12-03T21:18:12,052 DEBUG [RS:0;101545f66cbd:45695 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '101545f66cbd,45695,1733260691542' 2024-12-03T21:18:12,052 DEBUG [RS:0;101545f66cbd:45695 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T21:18:12,052 DEBUG [RS:0;101545f66cbd:45695 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T21:18:12,053 DEBUG [RS:0;101545f66cbd:45695 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T21:18:12,053 DEBUG [RS:0;101545f66cbd:45695 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T21:18:12,053 DEBUG [RS:0;101545f66cbd:45695 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 101545f66cbd,45695,1733260691542 2024-12-03T21:18:12,053 DEBUG [RS:0;101545f66cbd:45695 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '101545f66cbd,45695,1733260691542' 2024-12-03T21:18:12,053 DEBUG [RS:0;101545f66cbd:45695 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T21:18:12,054 DEBUG [RS:0;101545f66cbd:45695 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T21:18:12,054 DEBUG [RS:0;101545f66cbd:45695 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T21:18:12,054 INFO [RS:0;101545f66cbd:45695 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T21:18:12,054 INFO [RS:0;101545f66cbd:45695 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T21:18:12,137 WARN [101545f66cbd:36475 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-03T21:18:12,157 INFO [RS:0;101545f66cbd:45695 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=101545f66cbd%2C45695%2C1733260691542, suffix=, logDir=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542, archiveDir=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/oldWALs, maxLogs=32 2024-12-03T21:18:12,158 INFO [RS:0;101545f66cbd:45695 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C45695%2C1733260691542.1733260692158 2024-12-03T21:18:12,166 INFO [RS:0;101545f66cbd:45695 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260692158 2024-12-03T21:18:12,168 DEBUG [RS:0;101545f66cbd:45695 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38597:38597),(127.0.0.1/127.0.0.1:44649:44649)] 2024-12-03T21:18:12,387 DEBUG [101545f66cbd:36475 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-03T21:18:12,389 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=101545f66cbd,45695,1733260691542 2024-12-03T21:18:12,393 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 101545f66cbd,45695,1733260691542, state=OPENING 2024-12-03T21:18:12,415 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T21:18:12,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36475-0x1019e58f9790000, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:12,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45695-0x1019e58f9790001, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:12,425 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T21:18:12,425 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T21:18:12,425 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=101545f66cbd,45695,1733260691542}] 2024-12-03T21:18:12,425 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T21:18:12,582 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T21:18:12,585 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54925, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T21:18:12,590 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-03T21:18:12,590 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T21:18:12,593 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=101545f66cbd%2C45695%2C1733260691542.meta, suffix=.meta, logDir=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542, archiveDir=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/oldWALs, maxLogs=32 2024-12-03T21:18:12,594 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta 2024-12-03T21:18:12,601 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta 2024-12-03T21:18:12,602 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44649:44649),(127.0.0.1/127.0.0.1:38597:38597)] 2024-12-03T21:18:12,603 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T21:18:12,603 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T21:18:12,603 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T21:18:12,603 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T21:18:12,603 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T21:18:12,603 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:18:12,604 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-03T21:18:12,604 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-03T21:18:12,606 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T21:18:12,607 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T21:18:12,607 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:12,608 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:18:12,608 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T21:18:12,608 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T21:18:12,609 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:12,609 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:18:12,609 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T21:18:12,610 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T21:18:12,610 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:12,610 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:18:12,610 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T21:18:12,611 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T21:18:12,611 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:12,612 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:18:12,612 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T21:18:12,613 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/hbase/meta/1588230740 2024-12-03T21:18:12,614 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/hbase/meta/1588230740 2024-12-03T21:18:12,615 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T21:18:12,615 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T21:18:12,616 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T21:18:12,617 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T21:18:12,618 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=805794, jitterRate=0.024620920419692993}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T21:18:12,618 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-03T21:18:12,619 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733260692604Writing region info on filesystem at 1733260692604Initializing all the Stores at 1733260692605 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260692605Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260692605Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260692605Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260692605Cleaning up temporary data from old regions at 1733260692615 (+10 ms)Running coprocessor post-open hooks at 1733260692618 (+3 ms)Region opened successfully at 1733260692619 (+1 ms) 2024-12-03T21:18:12,620 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733260692582 2024-12-03T21:18:12,623 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T21:18:12,623 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-03T21:18:12,624 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=101545f66cbd,45695,1733260691542 2024-12-03T21:18:12,625 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 101545f66cbd,45695,1733260691542, state=OPEN 2024-12-03T21:18:12,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36475-0x1019e58f9790000, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T21:18:12,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45695-0x1019e58f9790001, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T21:18:12,665 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=101545f66cbd,45695,1733260691542 2024-12-03T21:18:12,665 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T21:18:12,665 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T21:18:12,670 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T21:18:12,670 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=101545f66cbd,45695,1733260691542 in 240 msec 2024-12-03T21:18:12,675 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T21:18:12,675 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 686 msec 2024-12-03T21:18:12,676 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T21:18:12,676 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-03T21:18:12,678 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T21:18:12,678 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=101545f66cbd,45695,1733260691542, seqNum=-1] 2024-12-03T21:18:12,679 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T21:18:12,680 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51545, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T21:18:12,687 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 748 msec 2024-12-03T21:18:12,687 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733260692687, completionTime=-1 2024-12-03T21:18:12,687 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-03T21:18:12,687 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-03T21:18:12,689 INFO [master/101545f66cbd:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-03T21:18:12,689 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733260752689 2024-12-03T21:18:12,689 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733260812689 2024-12-03T21:18:12,689 INFO [master/101545f66cbd:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-03T21:18:12,690 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,36475,1733260691366-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:12,690 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,36475,1733260691366-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:12,690 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,36475,1733260691366-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:12,690 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-101545f66cbd:36475, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:12,690 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:12,690 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:12,692 DEBUG [master/101545f66cbd:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-03T21:18:12,694 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.060sec 2024-12-03T21:18:12,694 INFO [master/101545f66cbd:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T21:18:12,694 INFO [master/101545f66cbd:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T21:18:12,694 INFO [master/101545f66cbd:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T21:18:12,694 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T21:18:12,694 INFO [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T21:18:12,694 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,36475,1733260691366-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T21:18:12,694 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,36475,1733260691366-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T21:18:12,697 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-03T21:18:12,697 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T21:18:12,697 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,36475,1733260691366-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:12,773 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@103b3390, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T21:18:12,773 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 101545f66cbd,36475,-1 for getting cluster id 2024-12-03T21:18:12,773 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T21:18:12,777 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '422f05c9-356f-4c11-a671-97641791f44b' 2024-12-03T21:18:12,778 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T21:18:12,778 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "422f05c9-356f-4c11-a671-97641791f44b" 2024-12-03T21:18:12,779 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f8038e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T21:18:12,779 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [101545f66cbd,36475,-1] 2024-12-03T21:18:12,779 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T21:18:12,780 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:18:12,783 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46204, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T21:18:12,784 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@502f6d00, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T21:18:12,784 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T21:18:12,786 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=101545f66cbd,45695,1733260691542, seqNum=-1] 2024-12-03T21:18:12,786 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T21:18:12,788 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33698, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T21:18:12,790 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=101545f66cbd,36475,1733260691366 2024-12-03T21:18:12,791 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:18:12,794 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-03T21:18:12,808 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/101545f66cbd:0 server-side Connection retries=45 2024-12-03T21:18:12,808 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T21:18:12,808 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T21:18:12,808 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T21:18:12,808 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T21:18:12,808 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T21:18:12,808 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T21:18:12,808 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T21:18:12,809 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39741 2024-12-03T21:18:12,810 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39741 connecting to ZooKeeper ensemble=127.0.0.1:59685 2024-12-03T21:18:12,811 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:18:12,812 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:18:12,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:397410x0, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T21:18:12,832 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39741-0x1019e58f9790002 connected 2024-12-03T21:18:12,832 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:39741-0x1019e58f9790002, quorum=127.0.0.1:59685, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-12-03T21:18:12,832 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-12-03T21:18:12,833 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T21:18:12,833 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T21:18:12,834 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:39741-0x1019e58f9790002, quorum=127.0.0.1:59685, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T21:18:12,836 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39741-0x1019e58f9790002, quorum=127.0.0.1:59685, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T21:18:12,838 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39741 2024-12-03T21:18:12,838 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39741 2024-12-03T21:18:12,839 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39741 2024-12-03T21:18:12,840 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39741 2024-12-03T21:18:12,840 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39741 2024-12-03T21:18:12,842 INFO [RS:1;101545f66cbd:39741 {}] regionserver.HRegionServer(746): ClusterId : 422f05c9-356f-4c11-a671-97641791f44b 2024-12-03T21:18:12,843 DEBUG [RS:1;101545f66cbd:39741 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T21:18:12,854 DEBUG [RS:1;101545f66cbd:39741 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T21:18:12,854 DEBUG [RS:1;101545f66cbd:39741 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T21:18:12,865 DEBUG [RS:1;101545f66cbd:39741 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T21:18:12,866 DEBUG [RS:1;101545f66cbd:39741 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fe73e2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=101545f66cbd/172.17.0.2:0 2024-12-03T21:18:12,880 DEBUG [RS:1;101545f66cbd:39741 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;101545f66cbd:39741 2024-12-03T21:18:12,881 INFO [RS:1;101545f66cbd:39741 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T21:18:12,881 INFO [RS:1;101545f66cbd:39741 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T21:18:12,881 DEBUG [RS:1;101545f66cbd:39741 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T21:18:12,882 INFO [RS:1;101545f66cbd:39741 {}] regionserver.HRegionServer(2659): reportForDuty to master=101545f66cbd,36475,1733260691366 with port=39741, startcode=1733260692808 2024-12-03T21:18:12,882 DEBUG [RS:1;101545f66cbd:39741 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T21:18:12,884 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48657, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T21:18:12,884 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36475 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 101545f66cbd,39741,1733260692808 2024-12-03T21:18:12,885 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36475 {}] master.ServerManager(517): Registering regionserver=101545f66cbd,39741,1733260692808 2024-12-03T21:18:12,886 DEBUG [RS:1;101545f66cbd:39741 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31 2024-12-03T21:18:12,886 DEBUG [RS:1;101545f66cbd:39741 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36575 2024-12-03T21:18:12,886 DEBUG [RS:1;101545f66cbd:39741 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T21:18:12,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36475-0x1019e58f9790000, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T21:18:12,896 DEBUG [RS:1;101545f66cbd:39741 {}] zookeeper.ZKUtil(111): regionserver:39741-0x1019e58f9790002, quorum=127.0.0.1:59685, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/101545f66cbd,39741,1733260692808 2024-12-03T21:18:12,896 WARN [RS:1;101545f66cbd:39741 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T21:18:12,896 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [101545f66cbd,39741,1733260692808] 2024-12-03T21:18:12,896 INFO [RS:1;101545f66cbd:39741 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T21:18:12,896 DEBUG [RS:1;101545f66cbd:39741 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808 2024-12-03T21:18:12,900 INFO [RS:1;101545f66cbd:39741 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T21:18:12,903 INFO [RS:1;101545f66cbd:39741 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T21:18:12,904 INFO [RS:1;101545f66cbd:39741 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T21:18:12,904 INFO [RS:1;101545f66cbd:39741 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:12,904 INFO [RS:1;101545f66cbd:39741 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T21:18:12,906 INFO [RS:1;101545f66cbd:39741 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T21:18:12,906 INFO [RS:1;101545f66cbd:39741 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:12,906 DEBUG [RS:1;101545f66cbd:39741 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:12,906 DEBUG [RS:1;101545f66cbd:39741 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:12,906 DEBUG [RS:1;101545f66cbd:39741 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:12,906 DEBUG [RS:1;101545f66cbd:39741 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:12,906 DEBUG [RS:1;101545f66cbd:39741 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:12,906 DEBUG [RS:1;101545f66cbd:39741 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/101545f66cbd:0, corePoolSize=2, maxPoolSize=2 2024-12-03T21:18:12,906 DEBUG [RS:1;101545f66cbd:39741 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:12,906 DEBUG [RS:1;101545f66cbd:39741 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:12,906 DEBUG [RS:1;101545f66cbd:39741 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:12,906 DEBUG [RS:1;101545f66cbd:39741 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:12,906 DEBUG [RS:1;101545f66cbd:39741 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:12,907 DEBUG [RS:1;101545f66cbd:39741 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:12,907 DEBUG [RS:1;101545f66cbd:39741 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/101545f66cbd:0, corePoolSize=3, maxPoolSize=3 2024-12-03T21:18:12,907 DEBUG [RS:1;101545f66cbd:39741 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0, corePoolSize=3, maxPoolSize=3 2024-12-03T21:18:12,909 INFO [RS:1;101545f66cbd:39741 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:12,909 INFO [RS:1;101545f66cbd:39741 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:12,909 INFO [RS:1;101545f66cbd:39741 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:12,909 INFO [RS:1;101545f66cbd:39741 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:12,909 INFO [RS:1;101545f66cbd:39741 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:12,909 INFO [RS:1;101545f66cbd:39741 {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,39741,1733260692808-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T21:18:12,923 INFO [RS:1;101545f66cbd:39741 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T21:18:12,923 INFO [RS:1;101545f66cbd:39741 {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,39741,1733260692808-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:12,923 INFO [RS:1;101545f66cbd:39741 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:12,923 INFO [RS:1;101545f66cbd:39741 {}] regionserver.Replication(171): 101545f66cbd,39741,1733260692808 started 2024-12-03T21:18:12,936 INFO [RS:1;101545f66cbd:39741 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:12,937 INFO [RS:1;101545f66cbd:39741 {}] regionserver.HRegionServer(1482): Serving as 101545f66cbd,39741,1733260692808, RpcServer on 101545f66cbd/172.17.0.2:39741, sessionid=0x1019e58f9790002 2024-12-03T21:18:12,937 DEBUG [RS:1;101545f66cbd:39741 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T21:18:12,937 DEBUG [RS:1;101545f66cbd:39741 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 101545f66cbd,39741,1733260692808 2024-12-03T21:18:12,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;101545f66cbd:39741,5,FailOnTimeoutGroup] 2024-12-03T21:18:12,937 DEBUG [RS:1;101545f66cbd:39741 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '101545f66cbd,39741,1733260692808' 2024-12-03T21:18:12,937 DEBUG [RS:1;101545f66cbd:39741 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T21:18:12,937 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-12-03T21:18:12,937 DEBUG [RS:1;101545f66cbd:39741 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T21:18:12,938 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-03T21:18:12,938 DEBUG [RS:1;101545f66cbd:39741 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T21:18:12,938 DEBUG [RS:1;101545f66cbd:39741 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T21:18:12,938 DEBUG [RS:1;101545f66cbd:39741 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 101545f66cbd,39741,1733260692808 2024-12-03T21:18:12,938 DEBUG [RS:1;101545f66cbd:39741 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '101545f66cbd,39741,1733260692808' 2024-12-03T21:18:12,938 DEBUG [RS:1;101545f66cbd:39741 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T21:18:12,938 DEBUG [RS:1;101545f66cbd:39741 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T21:18:12,939 DEBUG [RS:1;101545f66cbd:39741 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T21:18:12,939 INFO [RS:1;101545f66cbd:39741 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T21:18:12,939 INFO [RS:1;101545f66cbd:39741 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T21:18:12,939 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 101545f66cbd,36475,1733260691366 2024-12-03T21:18:12,939 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2ff67fa3 2024-12-03T21:18:12,939 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T21:18:12,963 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46214, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T21:18:12,964 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36475 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-03T21:18:12,964 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36475 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-03T21:18:12,964 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36475 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T21:18:12,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36475 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-12-03T21:18:12,967 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T21:18:12,967 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:12,967 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36475 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-12-03T21:18:12,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36475 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T21:18:12,969 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T21:18:12,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39781 is added to blk_1073741835_1011 (size=393) 2024-12-03T21:18:12,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39985 is added to blk_1073741835_1011 (size=393) 2024-12-03T21:18:12,978 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 8c36c7ec95832727c0c6dc4110c8f353, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31 2024-12-03T21:18:12,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39781 is added to blk_1073741836_1012 (size=76) 2024-12-03T21:18:12,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39985 is added to blk_1073741836_1012 (size=76) 2024-12-03T21:18:12,987 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:18:12,987 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 8c36c7ec95832727c0c6dc4110c8f353, disabling compactions & flushes 2024-12-03T21:18:12,987 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353. 2024-12-03T21:18:12,987 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353. 2024-12-03T21:18:12,987 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353. after waiting 0 ms 2024-12-03T21:18:12,987 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353. 2024-12-03T21:18:12,988 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353. 2024-12-03T21:18:12,988 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 8c36c7ec95832727c0c6dc4110c8f353: Waiting for close lock at 1733260692987Disabling compacts and flushes for region at 1733260692987Disabling writes for close at 1733260692987Writing region close event to WAL at 1733260692988 (+1 ms)Closed at 1733260692988 2024-12-03T21:18:12,989 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T21:18:12,990 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733260692990"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733260692990"}]},"ts":"1733260692990"} 2024-12-03T21:18:12,993 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-03T21:18:12,994 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T21:18:12,995 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733260692994"}]},"ts":"1733260692994"} 2024-12-03T21:18:12,997 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-12-03T21:18:12,998 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8c36c7ec95832727c0c6dc4110c8f353, ASSIGN}] 2024-12-03T21:18:12,999 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8c36c7ec95832727c0c6dc4110c8f353, ASSIGN 2024-12-03T21:18:13,001 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8c36c7ec95832727c0c6dc4110c8f353, ASSIGN; state=OFFLINE, location=101545f66cbd,45695,1733260691542; forceNewPlan=false, retain=false 2024-12-03T21:18:13,042 INFO [RS:1;101545f66cbd:39741 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=101545f66cbd%2C39741%2C1733260692808, suffix=, logDir=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808, archiveDir=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/oldWALs, maxLogs=32 2024-12-03T21:18:13,043 INFO [RS:1;101545f66cbd:39741 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C39741%2C1733260692808.1733260693042 2024-12-03T21:18:13,050 INFO [RS:1;101545f66cbd:39741 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 2024-12-03T21:18:13,054 DEBUG [RS:1;101545f66cbd:39741 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38597:38597),(127.0.0.1/127.0.0.1:44649:44649)] 2024-12-03T21:18:13,153 INFO [101545f66cbd:36475 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-03T21:18:13,153 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8c36c7ec95832727c0c6dc4110c8f353, regionState=OPENING, regionLocation=101545f66cbd,45695,1733260691542 2024-12-03T21:18:13,158 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8c36c7ec95832727c0c6dc4110c8f353, ASSIGN because future has completed 2024-12-03T21:18:13,159 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8c36c7ec95832727c0c6dc4110c8f353, server=101545f66cbd,45695,1733260691542}] 2024-12-03T21:18:13,320 INFO [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353. 2024-12-03T21:18:13,321 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 8c36c7ec95832727c0c6dc4110c8f353, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353.', STARTKEY => '', ENDKEY => ''} 2024-12-03T21:18:13,322 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 8c36c7ec95832727c0c6dc4110c8f353 2024-12-03T21:18:13,322 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:18:13,322 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 8c36c7ec95832727c0c6dc4110c8f353 2024-12-03T21:18:13,322 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 8c36c7ec95832727c0c6dc4110c8f353 2024-12-03T21:18:13,324 INFO [StoreOpener-8c36c7ec95832727c0c6dc4110c8f353-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 8c36c7ec95832727c0c6dc4110c8f353 2024-12-03T21:18:13,326 INFO [StoreOpener-8c36c7ec95832727c0c6dc4110c8f353-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8c36c7ec95832727c0c6dc4110c8f353 columnFamilyName info 2024-12-03T21:18:13,326 DEBUG [StoreOpener-8c36c7ec95832727c0c6dc4110c8f353-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:13,327 INFO [StoreOpener-8c36c7ec95832727c0c6dc4110c8f353-1 {}] regionserver.HStore(327): Store=8c36c7ec95832727c0c6dc4110c8f353/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T21:18:13,327 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 8c36c7ec95832727c0c6dc4110c8f353 2024-12-03T21:18:13,329 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353 2024-12-03T21:18:13,329 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353 2024-12-03T21:18:13,330 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 8c36c7ec95832727c0c6dc4110c8f353 2024-12-03T21:18:13,330 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 8c36c7ec95832727c0c6dc4110c8f353 2024-12-03T21:18:13,331 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 8c36c7ec95832727c0c6dc4110c8f353 2024-12-03T21:18:13,334 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T21:18:13,334 INFO [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 8c36c7ec95832727c0c6dc4110c8f353; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=855776, jitterRate=0.08817608654499054}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T21:18:13,334 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8c36c7ec95832727c0c6dc4110c8f353 2024-12-03T21:18:13,335 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 8c36c7ec95832727c0c6dc4110c8f353: Running coprocessor pre-open hook at 1733260693322Writing region info on filesystem at 1733260693322Initializing all the Stores at 1733260693324 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260693324Cleaning up temporary data from old regions at 1733260693330 (+6 ms)Running coprocessor post-open hooks at 1733260693334 (+4 ms)Region opened successfully at 1733260693335 (+1 ms) 2024-12-03T21:18:13,336 INFO [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353., pid=6, masterSystemTime=1733260693315 2024-12-03T21:18:13,338 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353. 2024-12-03T21:18:13,338 INFO [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353. 2024-12-03T21:18:13,339 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8c36c7ec95832727c0c6dc4110c8f353, regionState=OPEN, openSeqNum=2, regionLocation=101545f66cbd,45695,1733260691542 2024-12-03T21:18:13,341 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8c36c7ec95832727c0c6dc4110c8f353, server=101545f66cbd,45695,1733260691542 because future has completed 2024-12-03T21:18:13,345 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-03T21:18:13,345 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 8c36c7ec95832727c0c6dc4110c8f353, server=101545f66cbd,45695,1733260691542 in 183 msec 2024-12-03T21:18:13,348 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-03T21:18:13,348 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8c36c7ec95832727c0c6dc4110c8f353, ASSIGN in 347 msec 2024-12-03T21:18:13,349 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T21:18:13,350 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733260693349"}]},"ts":"1733260693349"} 2024-12-03T21:18:13,352 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-12-03T21:18:13,354 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T21:18:13,357 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 390 msec 2024-12-03T21:18:13,707 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-03T21:18:13,707 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-12-03T21:18:13,709 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-03T21:18:18,105 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T21:18:18,107 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:18:18,120 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:18:18,123 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:18:18,124 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:18:18,133 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-12-03T21:18:23,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36475 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T21:18:23,046 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-12-03T21:18:23,046 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-12-03T21:18:23,052 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-12-03T21:18:23,052 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353. 2024-12-03T21:18:23,065 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:18:23,070 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T21:18:23,070 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T21:18:23,070 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T21:18:23,071 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T21:18:23,071 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21f35bd7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/hadoop.log.dir/,AVAILABLE} 2024-12-03T21:18:23,071 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7bf0ac18{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T21:18:23,166 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@77d9f90{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/java.io.tmpdir/jetty-localhost-37571-hadoop-hdfs-3_4_1-tests_jar-_-any-569213492439811780/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:18:23,167 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@24a7a164{HTTP/1.1, (http/1.1)}{localhost:37571} 2024-12-03T21:18:23,167 INFO [Time-limited test {}] server.Server(415): Started @116265ms 2024-12-03T21:18:23,168 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T21:18:23,203 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:18:23,205 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T21:18:23,206 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T21:18:23,206 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T21:18:23,206 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T21:18:23,206 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3b812bb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/hadoop.log.dir/,AVAILABLE} 2024-12-03T21:18:23,207 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3517eda1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T21:18:23,301 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7072e3f6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/java.io.tmpdir/jetty-localhost-44435-hadoop-hdfs-3_4_1-tests_jar-_-any-3610707166228218282/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:18:23,301 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7bbb3640{HTTP/1.1, (http/1.1)}{localhost:44435} 2024-12-03T21:18:23,302 INFO [Time-limited test {}] server.Server(415): Started @116399ms 2024-12-03T21:18:23,303 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T21:18:23,339 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:18:23,341 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T21:18:23,342 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T21:18:23,342 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T21:18:23,342 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T21:18:23,343 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@71980077{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/hadoop.log.dir/,AVAILABLE} 2024-12-03T21:18:23,343 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2181a18f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T21:18:23,438 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@46598eee{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/java.io.tmpdir/jetty-localhost-34313-hadoop-hdfs-3_4_1-tests_jar-_-any-3666737270490698295/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:18:23,438 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5d2926be{HTTP/1.1, (http/1.1)}{localhost:34313} 2024-12-03T21:18:23,438 INFO [Time-limited test {}] server.Server(415): Started @116536ms 2024-12-03T21:18:23,439 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T21:18:24,225 WARN [Thread-859 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data5/current/BP-698693125-172.17.0.2-1733260689316/current, will proceed with Du for space computation calculation, 2024-12-03T21:18:24,226 WARN [Thread-860 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data6/current/BP-698693125-172.17.0.2-1733260689316/current, will proceed with Du for space computation calculation, 2024-12-03T21:18:24,240 WARN [Thread-801 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T21:18:24,242 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8ccbd07e9b3107de with lease ID 0x4bbc22489a7baa52: Processing first storage report for DS-96f79e41-4560-4ad8-bd94-6ac121270bfd from datanode DatanodeRegistration(127.0.0.1:36867, datanodeUuid=f23cfe64-c25c-42a3-9017-a7eff60f0915, infoPort=45031, infoSecurePort=0, ipcPort=37773, storageInfo=lv=-57;cid=testClusterID;nsid=1541511070;c=1733260689316) 2024-12-03T21:18:24,242 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8ccbd07e9b3107de with lease ID 0x4bbc22489a7baa52: from storage DS-96f79e41-4560-4ad8-bd94-6ac121270bfd node DatanodeRegistration(127.0.0.1:36867, datanodeUuid=f23cfe64-c25c-42a3-9017-a7eff60f0915, infoPort=45031, infoSecurePort=0, ipcPort=37773, storageInfo=lv=-57;cid=testClusterID;nsid=1541511070;c=1733260689316), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:18:24,242 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8ccbd07e9b3107de with lease ID 0x4bbc22489a7baa52: Processing first storage report for DS-fe28671f-2877-490e-a007-0c98a31e9b11 from datanode DatanodeRegistration(127.0.0.1:36867, datanodeUuid=f23cfe64-c25c-42a3-9017-a7eff60f0915, infoPort=45031, infoSecurePort=0, ipcPort=37773, storageInfo=lv=-57;cid=testClusterID;nsid=1541511070;c=1733260689316) 2024-12-03T21:18:24,242 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8ccbd07e9b3107de with lease ID 0x4bbc22489a7baa52: from storage DS-fe28671f-2877-490e-a007-0c98a31e9b11 node DatanodeRegistration(127.0.0.1:36867, datanodeUuid=f23cfe64-c25c-42a3-9017-a7eff60f0915, infoPort=45031, infoSecurePort=0, ipcPort=37773, storageInfo=lv=-57;cid=testClusterID;nsid=1541511070;c=1733260689316), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:18:24,446 WARN [Thread-871 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data7/current/BP-698693125-172.17.0.2-1733260689316/current, will proceed with Du for space computation calculation, 2024-12-03T21:18:24,446 WARN [Thread-872 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data8/current/BP-698693125-172.17.0.2-1733260689316/current, will proceed with Du for space computation calculation, 2024-12-03T21:18:24,466 WARN [Thread-823 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T21:18:24,468 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1c9ef637b2b86b35 with lease ID 0x4bbc22489a7baa53: Processing first storage report for DS-a05bc428-fee0-49f9-83de-bea8137d8ce4 from datanode DatanodeRegistration(127.0.0.1:46411, datanodeUuid=5a2af4c0-c083-4bce-b6ea-5c7248671f8c, infoPort=46635, infoSecurePort=0, ipcPort=36611, storageInfo=lv=-57;cid=testClusterID;nsid=1541511070;c=1733260689316) 2024-12-03T21:18:24,468 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1c9ef637b2b86b35 with lease ID 0x4bbc22489a7baa53: from storage DS-a05bc428-fee0-49f9-83de-bea8137d8ce4 node DatanodeRegistration(127.0.0.1:46411, datanodeUuid=5a2af4c0-c083-4bce-b6ea-5c7248671f8c, infoPort=46635, infoSecurePort=0, ipcPort=36611, storageInfo=lv=-57;cid=testClusterID;nsid=1541511070;c=1733260689316), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:18:24,468 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1c9ef637b2b86b35 with lease ID 0x4bbc22489a7baa53: Processing first storage report for DS-df062e1b-1dcb-435a-852e-6f7def771779 from datanode DatanodeRegistration(127.0.0.1:46411, datanodeUuid=5a2af4c0-c083-4bce-b6ea-5c7248671f8c, infoPort=46635, infoSecurePort=0, ipcPort=36611, storageInfo=lv=-57;cid=testClusterID;nsid=1541511070;c=1733260689316) 2024-12-03T21:18:24,468 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1c9ef637b2b86b35 with lease ID 0x4bbc22489a7baa53: from storage DS-df062e1b-1dcb-435a-852e-6f7def771779 node DatanodeRegistration(127.0.0.1:46411, datanodeUuid=5a2af4c0-c083-4bce-b6ea-5c7248671f8c, infoPort=46635, infoSecurePort=0, ipcPort=36611, storageInfo=lv=-57;cid=testClusterID;nsid=1541511070;c=1733260689316), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:18:24,560 WARN [Thread-882 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data9/current/BP-698693125-172.17.0.2-1733260689316/current, will proceed with Du for space computation calculation, 2024-12-03T21:18:24,560 WARN [Thread-883 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data10/current/BP-698693125-172.17.0.2-1733260689316/current, will proceed with Du for space computation calculation, 2024-12-03T21:18:24,577 WARN [Thread-845 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T21:18:24,579 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc79d65a84446a6ef with lease ID 0x4bbc22489a7baa54: Processing first storage report for DS-d9f14fe1-296a-4d65-a4ba-846dfb4113e9 from datanode DatanodeRegistration(127.0.0.1:39079, datanodeUuid=ea0232a6-4b98-40b1-b129-57633c15357a, infoPort=33501, infoSecurePort=0, ipcPort=33541, storageInfo=lv=-57;cid=testClusterID;nsid=1541511070;c=1733260689316) 2024-12-03T21:18:24,580 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc79d65a84446a6ef with lease ID 0x4bbc22489a7baa54: from storage DS-d9f14fe1-296a-4d65-a4ba-846dfb4113e9 node DatanodeRegistration(127.0.0.1:39079, datanodeUuid=ea0232a6-4b98-40b1-b129-57633c15357a, infoPort=33501, infoSecurePort=0, ipcPort=33541, storageInfo=lv=-57;cid=testClusterID;nsid=1541511070;c=1733260689316), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:18:24,580 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc79d65a84446a6ef with lease ID 0x4bbc22489a7baa54: Processing first storage report for DS-a53f4c9e-6dab-43b6-b277-8904d8ba14d2 from datanode DatanodeRegistration(127.0.0.1:39079, datanodeUuid=ea0232a6-4b98-40b1-b129-57633c15357a, infoPort=33501, infoSecurePort=0, ipcPort=33541, storageInfo=lv=-57;cid=testClusterID;nsid=1541511070;c=1733260689316) 2024-12-03T21:18:24,580 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc79d65a84446a6ef with lease ID 0x4bbc22489a7baa54: from storage DS-a53f4c9e-6dab-43b6-b277-8904d8ba14d2 node DatanodeRegistration(127.0.0.1:39079, datanodeUuid=ea0232a6-4b98-40b1-b129-57633c15357a, infoPort=33501, infoSecurePort=0, ipcPort=33541, storageInfo=lv=-57;cid=testClusterID;nsid=1541511070;c=1733260689316), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:18:24,667 WARN [ResponseProcessor for block BP-698693125-172.17.0.2-1733260689316:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-698693125-172.17.0.2-1733260689316:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:24,668 WARN [ResponseProcessor for block BP-698693125-172.17.0.2-1733260689316:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-698693125-172.17.0.2-1733260689316:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-698693125-172.17.0.2-1733260689316:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:24,668 WARN [ResponseProcessor for block BP-698693125-172.17.0.2-1733260689316:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-698693125-172.17.0.2-1733260689316:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-698693125-172.17.0.2-1733260689316:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:24,669 WARN [DataStreamer for file /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260692158 block BP-698693125-172.17.0.2-1733260689316:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK], DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK]) is bad. 2024-12-03T21:18:24,668 WARN [ResponseProcessor for block BP-698693125-172.17.0.2-1733260689316:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-698693125-172.17.0.2-1733260689316:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-698693125-172.17.0.2-1733260689316:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:24,668 WARN [DataStreamer for file /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 block BP-698693125-172.17.0.2-1733260689316:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK], DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK]) is bad. 2024-12-03T21:18:24,668 WARN [PacketResponder: BP-698693125-172.17.0.2-1733260689316:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:39985] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:24,668 WARN [PacketResponder: BP-698693125-172.17.0.2-1733260689316:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:39985] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:24,668 WARN [DataStreamer for file /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta block BP-698693125-172.17.0.2-1733260689316:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK], DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK]) is bad. 2024-12-03T21:18:24,669 WARN [PacketResponder: BP-698693125-172.17.0.2-1733260689316:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:39985] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:24,670 WARN [DataStreamer for file /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/WALs/101545f66cbd,36475,1733260691366/101545f66cbd%2C36475%2C1733260691366.1733260691718 block BP-698693125-172.17.0.2-1733260689316:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK], DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK]) is bad. 2024-12-03T21:18:24,671 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-88828206_22 at /127.0.0.1:48432 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:39781:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48432 dst: /127.0.0.1:39781 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:24,672 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:36104 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39985:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36104 dst: /127.0.0.1:39985 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:24,671 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1864487884_22 at /127.0.0.1:48368 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39781:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48368 dst: /127.0.0.1:39781 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:24,672 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:36092 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39985:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36092 dst: /127.0.0.1:39985 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:24,672 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:48412 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39781:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48412 dst: /127.0.0.1:39781 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:24,672 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6cc9fc90{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:18:24,672 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-88828206_22 at /127.0.0.1:36140 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:39985:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36140 dst: /127.0.0.1:39985 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:24,673 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:48396 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39781:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48396 dst: /127.0.0.1:39781 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:24,673 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1864487884_22 at /127.0.0.1:36066 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39985:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36066 dst: /127.0.0.1:39985 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:24,673 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7909691f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T21:18:24,674 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T21:18:24,674 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@d227195{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T21:18:24,674 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3f186434{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/hadoop.log.dir/,STOPPED} 2024-12-03T21:18:24,676 WARN [BP-698693125-172.17.0.2-1733260689316 heartbeating to localhost/127.0.0.1:36575 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T21:18:24,676 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T21:18:24,676 WARN [BP-698693125-172.17.0.2-1733260689316 heartbeating to localhost/127.0.0.1:36575 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-698693125-172.17.0.2-1733260689316 (Datanode Uuid cbf1eb3c-64f4-4d13-a033-84daec941fc5) service to localhost/127.0.0.1:36575 2024-12-03T21:18:24,676 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T21:18:24,677 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data3/current/BP-698693125-172.17.0.2-1733260689316 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:18:24,677 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data4/current/BP-698693125-172.17.0.2-1733260689316 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:18:24,678 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T21:18:24,678 WARN [DataStreamer for file /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 block BP-698693125-172.17.0.2-1733260689316:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:24,678 WARN [DataStreamer for file /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/WALs/101545f66cbd,36475,1733260691366/101545f66cbd%2C36475%2C1733260691366.1733260691718 block BP-698693125-172.17.0.2-1733260689316:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:24,678 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@ec83c08 {}] datanode.DataXceiver(331): 127.0.0.1:39781:DataXceiver error processing unknown operation src: /127.0.0.1:56840 dst: /127.0.0.1:39781 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:24,679 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1864487884_22 at /127.0.0.1:56836 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39781:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56836 dst: /127.0.0.1:39781 java.io.IOException: The stream is closed at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:117) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:914) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:24,679 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-88828206_22 at /127.0.0.1:56838 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:39781:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56838 dst: /127.0.0.1:39781 java.io.IOException: The stream is closed at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:117) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:914) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:24,679 WARN [DataStreamer for file /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260692158 block BP-698693125-172.17.0.2-1733260689316:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:24,679 WARN [DataStreamer for file /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta block BP-698693125-172.17.0.2-1733260689316:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:24,682 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@527a79d3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:18:24,683 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@78ac40e9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T21:18:24,683 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T21:18:24,683 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ed9c958{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T21:18:24,683 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ad2b72e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/hadoop.log.dir/,STOPPED} 2024-12-03T21:18:24,684 WARN [BP-698693125-172.17.0.2-1733260689316 heartbeating to localhost/127.0.0.1:36575 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T21:18:24,684 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T21:18:24,684 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T21:18:24,684 WARN [BP-698693125-172.17.0.2-1733260689316 heartbeating to localhost/127.0.0.1:36575 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-698693125-172.17.0.2-1733260689316 (Datanode Uuid e8625039-e36f-45c3-8b0a-40844ce3dcaf) service to localhost/127.0.0.1:36575 2024-12-03T21:18:24,685 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data1/current/BP-698693125-172.17.0.2-1733260689316 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:18:24,685 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data2/current/BP-698693125-172.17.0.2-1733260689316 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:18:24,685 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T21:18:24,689 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353., hostname=101545f66cbd,45695,1733260691542, seqNum=2] 2024-12-03T21:18:24,691 ERROR [FSHLog-0-hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31-prefix:101545f66cbd,45695,1733260691542 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:24,691 WARN [FSHLog-0-hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31-prefix:101545f66cbd,45695,1733260691542 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:24,692 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 101545f66cbd%2C45695%2C1733260691542:(num 1733260692158) roll requested 2024-12-03T21:18:24,692 INFO [regionserver/101545f66cbd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C45695%2C1733260691542.1733260704692 2024-12-03T21:18:24,695 WARN [Thread-896 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741838_1018 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:24,695 WARN [Thread-896 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK], DatanodeInfoWithStorage[127.0.0.1:39079,DS-d9f14fe1-296a-4d65-a4ba-846dfb4113e9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]) is bad. 2024-12-03T21:18:24,695 WARN [Thread-896 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741838_1018 2024-12-03T21:18:24,698 WARN [Thread-896 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK] 2024-12-03T21:18:24,704 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:24,704 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:24,705 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:24,705 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:24,705 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:24,705 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260692158 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260704692 2024-12-03T21:18:24,705 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:24,706 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:24,706 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33501:33501),(127.0.0.1/127.0.0.1:46635:46635)] 2024-12-03T21:18:24,706 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260692158 is not closed yet, will try archiving it next time 2024-12-03T21:18:24,707 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-03T21:18:24,707 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-03T21:18:24,707 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260692158 2024-12-03T21:18:24,710 WARN [IPC Server handler 1 on default port 36575 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260692158 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741833_1009 2024-12-03T21:18:24,713 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260692158 after 5ms 2024-12-03T21:18:24,908 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:25,697 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:26,706 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:26,708 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260704692 2024-12-03T21:18:26,708 WARN [ResponseProcessor for block BP-698693125-172.17.0.2-1733260689316:blk_1073741839_1019 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-698693125-172.17.0.2-1733260689316:blk_1073741839_1019 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:26,709 WARN [DataStreamer for file /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260704692 block BP-698693125-172.17.0.2-1733260689316:blk_1073741839_1019 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741839_1019 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39079,DS-d9f14fe1-296a-4d65-a4ba-846dfb4113e9,DISK], DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39079,DS-d9f14fe1-296a-4d65-a4ba-846dfb4113e9,DISK]) is bad. 2024-12-03T21:18:26,709 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:52820 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:39079:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52820 dst: /127.0.0.1:39079 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:26,709 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:46714 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:46411:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46714 dst: /127.0.0.1:46411 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:26,712 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@46598eee{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:18:26,712 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5d2926be{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T21:18:26,712 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T21:18:26,712 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2181a18f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T21:18:26,713 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@71980077{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/hadoop.log.dir/,STOPPED} 2024-12-03T21:18:26,714 WARN [BP-698693125-172.17.0.2-1733260689316 heartbeating to localhost/127.0.0.1:36575 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T21:18:26,714 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T21:18:26,714 WARN [BP-698693125-172.17.0.2-1733260689316 heartbeating to localhost/127.0.0.1:36575 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-698693125-172.17.0.2-1733260689316 (Datanode Uuid ea0232a6-4b98-40b1-b129-57633c15357a) service to localhost/127.0.0.1:36575 2024-12-03T21:18:26,714 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T21:18:26,715 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data9/current/BP-698693125-172.17.0.2-1733260689316 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:18:26,715 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data10/current/BP-698693125-172.17.0.2-1733260689316 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:18:26,716 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T21:18:26,909 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:27,698 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:28,707 WARN [regionserver/101545f66cbd:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK]] 2024-12-03T21:18:28,708 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:28,708 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 101545f66cbd%2C45695%2C1733260691542:(num 1733260704692) roll requested 2024-12-03T21:18:28,709 INFO [regionserver/101545f66cbd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C45695%2C1733260691542.1733260708708 2024-12-03T21:18:28,714 WARN [Thread-905 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:28,714 WARN [Thread-905 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK], DatanodeInfoWithStorage[127.0.0.1:39079,DS-d9f14fe1-296a-4d65-a4ba-846dfb4113e9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]) is bad. 2024-12-03T21:18:28,714 WARN [Thread-905 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741840_1022 2024-12-03T21:18:28,714 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260692158 after 4007ms 2024-12-03T21:18:28,715 WARN [Thread-905 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK] 2024-12-03T21:18:28,716 WARN [Thread-905 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:28,717 WARN [Thread-905 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK], DatanodeInfoWithStorage[127.0.0.1:39079,DS-d9f14fe1-296a-4d65-a4ba-846dfb4113e9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK]) is bad. 2024-12-03T21:18:28,717 WARN [Thread-905 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741841_1023 2024-12-03T21:18:28,717 WARN [Thread-905 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK] 2024-12-03T21:18:28,721 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T21:18:28,728 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:28,729 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:28,729 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:28,729 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:28,729 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:28,729 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260704692 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260708708 2024-12-03T21:18:28,730 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45031:45031),(127.0.0.1/127.0.0.1:46635:46635)] 2024-12-03T21:18:28,730 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260692158 is not closed yet, will try archiving it next time 2024-12-03T21:18:28,730 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260704692 is not closed yet, will try archiving it next time 2024-12-03T21:18:28,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46411 is added to blk_1073741839_1021 (size=3600) 2024-12-03T21:18:28,909 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:29,133 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260692158 is not closed yet, will try archiving it next time 2024-12-03T21:18:29,698 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:30,725 WARN [ResponseProcessor for block BP-698693125-172.17.0.2-1733260689316:blk_1073741842_1024 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-698693125-172.17.0.2-1733260689316:blk_1073741842_1024 java.io.IOException: Bad response ERROR for BP-698693125-172.17.0.2-1733260689316:blk_1073741842_1024 from datanode DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:30,725 WARN [DataStreamer for file /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260708708 block BP-698693125-172.17.0.2-1733260689316:blk_1073741842_1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36867,DS-96f79e41-4560-4ad8-bd94-6ac121270bfd,DISK], DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK]) is bad. 2024-12-03T21:18:30,725 WARN [PacketResponder: BP-698693125-172.17.0.2-1733260689316:blk_1073741842_1024, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:46411] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:30,726 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:43314 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:36867:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43314 dst: /127.0.0.1:36867 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:30,727 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:46734 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:46411:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46734 dst: /127.0.0.1:46411 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:30,730 WARN [regionserver/101545f66cbd:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36867,DS-96f79e41-4560-4ad8-bd94-6ac121270bfd,DISK]] 2024-12-03T21:18:30,731 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:30,731 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 101545f66cbd%2C45695%2C1733260691542:(num 1733260708708) roll requested 2024-12-03T21:18:30,732 INFO [regionserver/101545f66cbd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C45695%2C1733260691542.1733260710731 2024-12-03T21:18:30,735 WARN [Thread-914 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:30,736 WARN [Thread-914 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK], DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK]) is bad. 2024-12-03T21:18:30,736 WARN [Thread-914 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741843_1026 2024-12-03T21:18:30,737 WARN [Thread-914 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK] 2024-12-03T21:18:30,738 WARN [Thread-914 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:30,739 WARN [Thread-914 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39079,DS-d9f14fe1-296a-4d65-a4ba-846dfb4113e9,DISK], DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39079,DS-d9f14fe1-296a-4d65-a4ba-846dfb4113e9,DISK]) is bad. 2024-12-03T21:18:30,739 WARN [Thread-914 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741844_1027 2024-12-03T21:18:30,739 WARN [Thread-914 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39079,DS-d9f14fe1-296a-4d65-a4ba-846dfb4113e9,DISK] 2024-12-03T21:18:30,741 WARN [Thread-914 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:30,741 WARN [Thread-914 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK], DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK]) is bad. 2024-12-03T21:18:30,741 WARN [Thread-914 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741845_1028 2024-12-03T21:18:30,742 WARN [Thread-914 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK] 2024-12-03T21:18:30,745 WARN [Thread-914 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39781 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:30,745 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:43338 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data6]'}, localName='127.0.0.1:36867', datanodeUuid='f23cfe64-c25c-42a3-9017-a7eff60f0915', xmitsInProgress=0}:Exception transferring block BP-698693125-172.17.0.2-1733260689316:blk_1073741846_1029 to mirror 127.0.0.1:39781 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:30,745 WARN [Thread-914 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36867,DS-96f79e41-4560-4ad8-bd94-6ac121270bfd,DISK], DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]) is bad. 2024-12-03T21:18:30,745 WARN [Thread-914 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741846_1029 2024-12-03T21:18:30,745 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:43338 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-03T21:18:30,745 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:43338 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:36867:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43338 dst: /127.0.0.1:36867 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:30,746 WARN [Thread-914 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK] 2024-12-03T21:18:30,747 WARN [IPC Server handler 2 on default port 36575 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-03T21:18:30,747 WARN [IPC Server handler 2 on default port 36575 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-03T21:18:30,747 WARN [IPC Server handler 2 on default port 36575 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-03T21:18:30,748 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7072e3f6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:18:30,748 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7bbb3640{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T21:18:30,748 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T21:18:30,749 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3517eda1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T21:18:30,749 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3b812bb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/hadoop.log.dir/,STOPPED} 2024-12-03T21:18:30,750 WARN [BP-698693125-172.17.0.2-1733260689316 heartbeating to localhost/127.0.0.1:36575 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T21:18:30,750 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T21:18:30,750 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T21:18:30,750 WARN [BP-698693125-172.17.0.2-1733260689316 heartbeating to localhost/127.0.0.1:36575 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-698693125-172.17.0.2-1733260689316 (Datanode Uuid 5a2af4c0-c083-4bce-b6ea-5c7248671f8c) service to localhost/127.0.0.1:36575 2024-12-03T21:18:30,750 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data7/current/BP-698693125-172.17.0.2-1733260689316 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:18:30,751 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data8/current/BP-698693125-172.17.0.2-1733260689316 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:18:30,751 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T21:18:30,751 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:30,751 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:30,751 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:30,751 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:30,752 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:30,752 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260708708 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260710731 2024-12-03T21:18:30,753 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45031:45031)] 2024-12-03T21:18:30,753 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260692158 is not closed yet, will try archiving it next time 2024-12-03T21:18:30,753 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260708708 is not closed yet, will try archiving it next time 2024-12-03T21:18:30,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36867 is added to blk_1073741842_1025 (size=93) 2024-12-03T21:18:30,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45695 {}] regionserver.HRegion(8855): Flush requested on 8c36c7ec95832727c0c6dc4110c8f353 2024-12-03T21:18:30,760 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8c36c7ec95832727c0c6dc4110c8f353 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T21:18:30,780 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/.tmp/info/424ec8301993470db811b6ff3f86aea2 is 1080, key is row0002/info:/1733260706717/Put/seqid=0 2024-12-03T21:18:30,782 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:30,782 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39079,DS-d9f14fe1-296a-4d65-a4ba-846dfb4113e9,DISK], DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39079,DS-d9f14fe1-296a-4d65-a4ba-846dfb4113e9,DISK]) is bad. 2024-12-03T21:18:30,782 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741848_1031 2024-12-03T21:18:30,783 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39079,DS-d9f14fe1-296a-4d65-a4ba-846dfb4113e9,DISK] 2024-12-03T21:18:30,784 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:30,784 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK], DatanodeInfoWithStorage[127.0.0.1:36867,DS-96f79e41-4560-4ad8-bd94-6ac121270bfd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]) is bad. 2024-12-03T21:18:30,784 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741849_1032 2024-12-03T21:18:30,784 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK] 2024-12-03T21:18:30,786 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46411 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:30,786 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:43348 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741850_1033] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data6]'}, localName='127.0.0.1:36867', datanodeUuid='f23cfe64-c25c-42a3-9017-a7eff60f0915', xmitsInProgress=0}:Exception transferring block BP-698693125-172.17.0.2-1733260689316:blk_1073741850_1033 to mirror 127.0.0.1:46411 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:30,787 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36867,DS-96f79e41-4560-4ad8-bd94-6ac121270bfd,DISK], DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK]) is bad. 2024-12-03T21:18:30,787 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741850_1033 2024-12-03T21:18:30,787 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:43348 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741850_1033] {}] datanode.BlockReceiver(316): Block 1073741850 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T21:18:30,787 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:43348 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741850_1033] {}] datanode.DataXceiver(331): 127.0.0.1:36867:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43348 dst: /127.0.0.1:36867 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:30,787 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK] 2024-12-03T21:18:30,788 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:30,789 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK], DatanodeInfoWithStorage[127.0.0.1:36867,DS-96f79e41-4560-4ad8-bd94-6ac121270bfd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK]) is bad. 2024-12-03T21:18:30,789 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741851_1034 2024-12-03T21:18:30,789 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK] 2024-12-03T21:18:30,790 WARN [IPC Server handler 3 on default port 36575 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-03T21:18:30,790 WARN [IPC Server handler 3 on default port 36575 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-03T21:18:30,790 WARN [IPC Server handler 3 on default port 36575 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-03T21:18:30,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36867 is added to blk_1073741852_1035 (size=10347) 2024-12-03T21:18:30,910 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:31,156 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260692158 is not closed yet, will try archiving it next time 2024-12-03T21:18:31,157 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260708708 to hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/oldWALs/101545f66cbd%2C45695%2C1733260691542.1733260708708 2024-12-03T21:18:31,194 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/.tmp/info/424ec8301993470db811b6ff3f86aea2 2024-12-03T21:18:31,203 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/.tmp/info/424ec8301993470db811b6ff3f86aea2 as hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/424ec8301993470db811b6ff3f86aea2 2024-12-03T21:18:31,209 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/424ec8301993470db811b6ff3f86aea2, entries=5, sequenceid=11, filesize=10.1 K 2024-12-03T21:18:31,211 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 8c36c7ec95832727c0c6dc4110c8f353 in 450ms, sequenceid=11, compaction requested=false 2024-12-03T21:18:31,211 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8c36c7ec95832727c0c6dc4110c8f353: 2024-12-03T21:18:31,259 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@13c7678a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36867, datanodeUuid=f23cfe64-c25c-42a3-9017-a7eff60f0915, infoPort=45031, infoSecurePort=0, ipcPort=37773, storageInfo=lv=-57;cid=testClusterID;nsid=1541511070;c=1733260689316):Failed to transfer BP-698693125-172.17.0.2-1733260689316:blk_1073741852_1035 to 127.0.0.1:39079 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:31,259 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5fb17874[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36867, datanodeUuid=f23cfe64-c25c-42a3-9017-a7eff60f0915, infoPort=45031, infoSecurePort=0, ipcPort=37773, storageInfo=lv=-57;cid=testClusterID;nsid=1541511070;c=1733260689316):Failed to transfer BP-698693125-172.17.0.2-1733260689316:blk_1073741842_1025 to 127.0.0.1:39985 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:31,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45695 {}] regionserver.HRegion(8855): Flush requested on 8c36c7ec95832727c0c6dc4110c8f353 2024-12-03T21:18:31,387 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8c36c7ec95832727c0c6dc4110c8f353 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-12-03T21:18:31,393 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/.tmp/info/63c85944ae564c3cbdca37cf9eb4e0c0 is 1080, key is row0007/info:/1733260710761/Put/seqid=0 2024-12-03T21:18:31,395 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:31,395 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39079,DS-d9f14fe1-296a-4d65-a4ba-846dfb4113e9,DISK], DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39079,DS-d9f14fe1-296a-4d65-a4ba-846dfb4113e9,DISK]) is bad. 2024-12-03T21:18:31,395 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741853_1036 2024-12-03T21:18:31,396 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39079,DS-d9f14fe1-296a-4d65-a4ba-846dfb4113e9,DISK] 2024-12-03T21:18:31,398 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:43368 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741854_1037] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data6]'}, localName='127.0.0.1:36867', datanodeUuid='f23cfe64-c25c-42a3-9017-a7eff60f0915', xmitsInProgress=0}:Exception transferring block BP-698693125-172.17.0.2-1733260689316:blk_1073741854_1037 to mirror 127.0.0.1:39985 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:31,398 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39985 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:31,399 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:43368 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741854_1037] {}] datanode.BlockReceiver(316): Block 1073741854 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T21:18:31,399 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36867,DS-96f79e41-4560-4ad8-bd94-6ac121270bfd,DISK], DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK]) is bad. 2024-12-03T21:18:31,399 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741854_1037 2024-12-03T21:18:31,399 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:43368 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741854_1037] {}] datanode.DataXceiver(331): 127.0.0.1:36867:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43368 dst: /127.0.0.1:36867 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:31,400 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK] 2024-12-03T21:18:31,402 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46411 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:31,402 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:43372 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741855_1038] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data6]'}, localName='127.0.0.1:36867', datanodeUuid='f23cfe64-c25c-42a3-9017-a7eff60f0915', xmitsInProgress=0}:Exception transferring block BP-698693125-172.17.0.2-1733260689316:blk_1073741855_1038 to mirror 127.0.0.1:46411 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:31,402 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36867,DS-96f79e41-4560-4ad8-bd94-6ac121270bfd,DISK], DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK]) is bad. 2024-12-03T21:18:31,402 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:43372 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741855_1038] {}] datanode.BlockReceiver(316): Block 1073741855 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T21:18:31,402 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741855_1038 2024-12-03T21:18:31,402 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:43372 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741855_1038] {}] datanode.DataXceiver(331): 127.0.0.1:36867:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43372 dst: /127.0.0.1:36867 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:31,403 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK] 2024-12-03T21:18:31,404 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:31,404 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK], DatanodeInfoWithStorage[127.0.0.1:36867,DS-96f79e41-4560-4ad8-bd94-6ac121270bfd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]) is bad. 2024-12-03T21:18:31,404 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741856_1039 2024-12-03T21:18:31,405 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK] 2024-12-03T21:18:31,406 WARN [IPC Server handler 2 on default port 36575 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-03T21:18:31,406 WARN [IPC Server handler 2 on default port 36575 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-03T21:18:31,406 WARN [IPC Server handler 2 on default port 36575 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-03T21:18:31,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36867 is added to blk_1073741857_1040 (size=12506) 2024-12-03T21:18:31,699 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:31,810 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/.tmp/info/63c85944ae564c3cbdca37cf9eb4e0c0 2024-12-03T21:18:31,821 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/.tmp/info/63c85944ae564c3cbdca37cf9eb4e0c0 as hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/63c85944ae564c3cbdca37cf9eb4e0c0 2024-12-03T21:18:31,830 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/63c85944ae564c3cbdca37cf9eb4e0c0, entries=7, sequenceid=24, filesize=12.2 K 2024-12-03T21:18:31,832 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 8c36c7ec95832727c0c6dc4110c8f353 in 444ms, sequenceid=24, compaction requested=false 2024-12-03T21:18:31,832 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8c36c7ec95832727c0c6dc4110c8f353: 2024-12-03T21:18:31,832 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-12-03T21:18:31,832 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T21:18:31,832 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/63c85944ae564c3cbdca37cf9eb4e0c0 because midkey is the same as first or last row 2024-12-03T21:18:32,753 WARN [regionserver/101545f66cbd:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36867,DS-96f79e41-4560-4ad8-bd94-6ac121270bfd,DISK]] 2024-12-03T21:18:32,753 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:32,754 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 101545f66cbd%2C45695%2C1733260691542:(num 1733260710731) roll requested 2024-12-03T21:18:32,754 INFO [regionserver/101545f66cbd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C45695%2C1733260691542.1733260712754 2024-12-03T21:18:32,758 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:32,759 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK], DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK]) is bad. 2024-12-03T21:18:32,759 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741858_1041 2024-12-03T21:18:32,760 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK] 2024-12-03T21:18:32,762 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:32,762 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39079,DS-d9f14fe1-296a-4d65-a4ba-846dfb4113e9,DISK], DatanodeInfoWithStorage[127.0.0.1:36867,DS-96f79e41-4560-4ad8-bd94-6ac121270bfd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39079,DS-d9f14fe1-296a-4d65-a4ba-846dfb4113e9,DISK]) is bad. 2024-12-03T21:18:32,762 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741859_1042 2024-12-03T21:18:32,763 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39079,DS-d9f14fe1-296a-4d65-a4ba-846dfb4113e9,DISK] 2024-12-03T21:18:32,765 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:32,765 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK], DatanodeInfoWithStorage[127.0.0.1:36867,DS-96f79e41-4560-4ad8-bd94-6ac121270bfd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK]) is bad. 2024-12-03T21:18:32,765 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741860_1043 2024-12-03T21:18:32,766 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK] 2024-12-03T21:18:32,770 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39781 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:32,770 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:43402 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741861_1044] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data6]'}, localName='127.0.0.1:36867', datanodeUuid='f23cfe64-c25c-42a3-9017-a7eff60f0915', xmitsInProgress=0}:Exception transferring block BP-698693125-172.17.0.2-1733260689316:blk_1073741861_1044 to mirror 127.0.0.1:39781 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:32,770 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36867,DS-96f79e41-4560-4ad8-bd94-6ac121270bfd,DISK], DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]) is bad. 2024-12-03T21:18:32,770 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741861_1044 2024-12-03T21:18:32,770 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:43402 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741861_1044] {}] datanode.BlockReceiver(316): Block 1073741861 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-03T21:18:32,770 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:43402 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741861_1044] {}] datanode.DataXceiver(331): 127.0.0.1:36867:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43402 dst: /127.0.0.1:36867 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:32,771 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK] 2024-12-03T21:18:32,772 WARN [IPC Server handler 3 on default port 36575 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-03T21:18:32,772 WARN [IPC Server handler 3 on default port 36575 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-03T21:18:32,772 WARN [IPC Server handler 3 on default port 36575 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-03T21:18:32,775 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:32,775 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:32,775 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:32,775 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:32,775 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:32,775 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260710731 with entries=24, filesize=24.23 KB; new WAL /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260712754 2024-12-03T21:18:32,776 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45031:45031)] 2024-12-03T21:18:32,776 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260692158 is not closed yet, will try archiving it next time 2024-12-03T21:18:32,776 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260710731 is not closed yet, will try archiving it next time 2024-12-03T21:18:32,777 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260704692 to hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/oldWALs/101545f66cbd%2C45695%2C1733260691542.1733260704692 2024-12-03T21:18:32,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36867 is added to blk_1073741847_1030 (size=24823) 2024-12-03T21:18:32,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45695 {}] regionserver.HRegion(8855): Flush requested on 8c36c7ec95832727c0c6dc4110c8f353 2024-12-03T21:18:32,810 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8c36c7ec95832727c0c6dc4110c8f353 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-03T21:18:32,815 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/.tmp/info/8176a5042f4b418fb5b17147d437afeb is 1079, key is tmprow/info:/1733260712809/Put/seqid=0 2024-12-03T21:18:32,817 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:32,817 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK], DatanodeInfoWithStorage[127.0.0.1:36867,DS-96f79e41-4560-4ad8-bd94-6ac121270bfd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]) is bad. 2024-12-03T21:18:32,817 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741863_1046 2024-12-03T21:18:32,818 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK] 2024-12-03T21:18:32,820 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39985 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:32,820 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:43432 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741864_1047] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data6]'}, localName='127.0.0.1:36867', datanodeUuid='f23cfe64-c25c-42a3-9017-a7eff60f0915', xmitsInProgress=0}:Exception transferring block BP-698693125-172.17.0.2-1733260689316:blk_1073741864_1047 to mirror 127.0.0.1:39985 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:32,820 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36867,DS-96f79e41-4560-4ad8-bd94-6ac121270bfd,DISK], DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK]) is bad. 2024-12-03T21:18:32,820 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:43432 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741864_1047] {}] datanode.BlockReceiver(316): Block 1073741864 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T21:18:32,820 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741864_1047 2024-12-03T21:18:32,820 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:43432 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741864_1047] {}] datanode.DataXceiver(331): 127.0.0.1:36867:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43432 dst: /127.0.0.1:36867 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:32,820 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK] 2024-12-03T21:18:32,821 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:32,822 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39079,DS-d9f14fe1-296a-4d65-a4ba-846dfb4113e9,DISK], DatanodeInfoWithStorage[127.0.0.1:36867,DS-96f79e41-4560-4ad8-bd94-6ac121270bfd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39079,DS-d9f14fe1-296a-4d65-a4ba-846dfb4113e9,DISK]) is bad. 2024-12-03T21:18:32,822 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741865_1048 2024-12-03T21:18:32,822 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39079,DS-d9f14fe1-296a-4d65-a4ba-846dfb4113e9,DISK] 2024-12-03T21:18:32,823 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:32,823 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK], DatanodeInfoWithStorage[127.0.0.1:36867,DS-96f79e41-4560-4ad8-bd94-6ac121270bfd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK]) is bad. 2024-12-03T21:18:32,823 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741866_1049 2024-12-03T21:18:32,824 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK] 2024-12-03T21:18:32,824 WARN [IPC Server handler 1 on default port 36575 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-03T21:18:32,824 WARN [IPC Server handler 1 on default port 36575 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-03T21:18:32,824 WARN [IPC Server handler 1 on default port 36575 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-03T21:18:32,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36867 is added to blk_1073741867_1050 (size=6027) 2024-12-03T21:18:32,910 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:33,180 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260692158 is not closed yet, will try archiving it next time 2024-12-03T21:18:33,228 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/.tmp/info/8176a5042f4b418fb5b17147d437afeb 2024-12-03T21:18:33,236 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/.tmp/info/8176a5042f4b418fb5b17147d437afeb as hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/8176a5042f4b418fb5b17147d437afeb 2024-12-03T21:18:33,245 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/8176a5042f4b418fb5b17147d437afeb, entries=1, sequenceid=34, filesize=5.9 K 2024-12-03T21:18:33,246 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 8c36c7ec95832727c0c6dc4110c8f353 in 436ms, sequenceid=34, compaction requested=true 2024-12-03T21:18:33,246 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8c36c7ec95832727c0c6dc4110c8f353: 2024-12-03T21:18:33,246 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-12-03T21:18:33,246 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T21:18:33,246 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/63c85944ae564c3cbdca37cf9eb4e0c0 because midkey is the same as first or last row 2024-12-03T21:18:33,246 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8c36c7ec95832727c0c6dc4110c8f353:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T21:18:33,246 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T21:18:33,247 DEBUG [RS:0;101545f66cbd:45695-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T21:18:33,248 DEBUG [RS:0;101545f66cbd:45695-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T21:18:33,248 DEBUG [RS:0;101545f66cbd:45695-shortCompactions-0 {}] regionserver.HStore(1541): 8c36c7ec95832727c0c6dc4110c8f353/info is initiating minor compaction (all files) 2024-12-03T21:18:33,248 INFO [RS:0;101545f66cbd:45695-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8c36c7ec95832727c0c6dc4110c8f353/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353. 2024-12-03T21:18:33,249 INFO [RS:0;101545f66cbd:45695-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/424ec8301993470db811b6ff3f86aea2, hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/63c85944ae564c3cbdca37cf9eb4e0c0, hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/8176a5042f4b418fb5b17147d437afeb] into tmpdir=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/.tmp, totalSize=28.2 K 2024-12-03T21:18:33,249 DEBUG [RS:0;101545f66cbd:45695-shortCompactions-0 {}] compactions.Compactor(225): Compacting 424ec8301993470db811b6ff3f86aea2, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733260706717 2024-12-03T21:18:33,250 DEBUG [RS:0;101545f66cbd:45695-shortCompactions-0 {}] compactions.Compactor(225): Compacting 63c85944ae564c3cbdca37cf9eb4e0c0, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1733260710761 2024-12-03T21:18:33,250 DEBUG [RS:0;101545f66cbd:45695-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8176a5042f4b418fb5b17147d437afeb, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733260712809 2024-12-03T21:18:33,266 INFO [RS:0;101545f66cbd:45695-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8c36c7ec95832727c0c6dc4110c8f353#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T21:18:33,267 DEBUG [RS:0;101545f66cbd:45695-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/.tmp/info/6ee052dc2fd74cd791653bab1d534eff is 1080, key is row0002/info:/1733260706717/Put/seqid=0 2024-12-03T21:18:33,268 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:33,269 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK], DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK]) is bad. 2024-12-03T21:18:33,269 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741868_1051 2024-12-03T21:18:33,269 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK] 2024-12-03T21:18:33,270 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:33,270 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK], DatanodeInfoWithStorage[127.0.0.1:36867,DS-96f79e41-4560-4ad8-bd94-6ac121270bfd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]) is bad. 2024-12-03T21:18:33,271 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741869_1052 2024-12-03T21:18:33,271 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK] 2024-12-03T21:18:33,273 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:43462 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741870_1053] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data6]'}, localName='127.0.0.1:36867', datanodeUuid='f23cfe64-c25c-42a3-9017-a7eff60f0915', xmitsInProgress=0}:Exception transferring block BP-698693125-172.17.0.2-1733260689316:blk_1073741870_1053 to mirror 127.0.0.1:39079 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:33,273 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39079 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:33,274 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:43462 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741870_1053] {}] datanode.BlockReceiver(316): Block 1073741870 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T21:18:33,274 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36867,DS-96f79e41-4560-4ad8-bd94-6ac121270bfd,DISK], DatanodeInfoWithStorage[127.0.0.1:39079,DS-d9f14fe1-296a-4d65-a4ba-846dfb4113e9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39079,DS-d9f14fe1-296a-4d65-a4ba-846dfb4113e9,DISK]) is bad. 2024-12-03T21:18:33,274 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741870_1053 2024-12-03T21:18:33,274 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:43462 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741870_1053] {}] datanode.DataXceiver(331): 127.0.0.1:36867:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43462 dst: /127.0.0.1:36867 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:33,274 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39079,DS-d9f14fe1-296a-4d65-a4ba-846dfb4113e9,DISK] 2024-12-03T21:18:33,276 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:33,276 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK], DatanodeInfoWithStorage[127.0.0.1:36867,DS-96f79e41-4560-4ad8-bd94-6ac121270bfd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK]) is bad. 2024-12-03T21:18:33,276 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741871_1054 2024-12-03T21:18:33,277 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK] 2024-12-03T21:18:33,277 WARN [IPC Server handler 3 on default port 36575 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-03T21:18:33,277 WARN [IPC Server handler 3 on default port 36575 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-03T21:18:33,278 WARN [IPC Server handler 3 on default port 36575 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-03T21:18:33,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36867 is added to blk_1073741872_1055 (size=17994) 2024-12-03T21:18:33,697 DEBUG [RS:0;101545f66cbd:45695-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/.tmp/info/6ee052dc2fd74cd791653bab1d534eff as hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/6ee052dc2fd74cd791653bab1d534eff 2024-12-03T21:18:33,699 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:33,704 INFO [RS:0;101545f66cbd:45695-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8c36c7ec95832727c0c6dc4110c8f353/info of 8c36c7ec95832727c0c6dc4110c8f353 into 6ee052dc2fd74cd791653bab1d534eff(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T21:18:33,704 DEBUG [RS:0;101545f66cbd:45695-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8c36c7ec95832727c0c6dc4110c8f353: 2024-12-03T21:18:33,704 INFO [RS:0;101545f66cbd:45695-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353., storeName=8c36c7ec95832727c0c6dc4110c8f353/info, priority=13, startTime=1733260713246; duration=0sec 2024-12-03T21:18:33,704 DEBUG [RS:0;101545f66cbd:45695-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-03T21:18:33,704 DEBUG [RS:0;101545f66cbd:45695-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T21:18:33,705 DEBUG [RS:0;101545f66cbd:45695-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/6ee052dc2fd74cd791653bab1d534eff because midkey is the same as first or last row 2024-12-03T21:18:33,705 DEBUG [RS:0;101545f66cbd:45695-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-03T21:18:33,705 DEBUG [RS:0;101545f66cbd:45695-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T21:18:33,705 DEBUG [RS:0;101545f66cbd:45695-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/6ee052dc2fd74cd791653bab1d534eff because midkey is the same as first or last row 2024-12-03T21:18:33,705 DEBUG [RS:0;101545f66cbd:45695-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-03T21:18:33,705 DEBUG [RS:0;101545f66cbd:45695-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T21:18:33,705 DEBUG [RS:0;101545f66cbd:45695-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/6ee052dc2fd74cd791653bab1d534eff because midkey is the same as first or last row 2024-12-03T21:18:33,705 DEBUG [RS:0;101545f66cbd:45695-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T21:18:33,705 DEBUG [RS:0;101545f66cbd:45695-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8c36c7ec95832727c0c6dc4110c8f353:info 2024-12-03T21:18:34,238 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8c36c7ec95832727c0c6dc4110c8f353 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-03T21:18:34,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45695 {}] regionserver.HRegion(8855): Flush requested on 8c36c7ec95832727c0c6dc4110c8f353 2024-12-03T21:18:34,243 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5fb17874[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36867, datanodeUuid=f23cfe64-c25c-42a3-9017-a7eff60f0915, infoPort=45031, infoSecurePort=0, ipcPort=37773, storageInfo=lv=-57;cid=testClusterID;nsid=1541511070;c=1733260689316):Failed to transfer BP-698693125-172.17.0.2-1733260689316:blk_1073741857_1040 to 127.0.0.1:39985 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:34,243 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@13c7678a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36867, datanodeUuid=f23cfe64-c25c-42a3-9017-a7eff60f0915, infoPort=45031, infoSecurePort=0, ipcPort=37773, storageInfo=lv=-57;cid=testClusterID;nsid=1541511070;c=1733260689316):Failed to transfer BP-698693125-172.17.0.2-1733260689316:blk_1073741847_1030 to 127.0.0.1:39985 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:34,244 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/.tmp/info/551c395b90614e99a915554bce203deb is 1079, key is tmprow/info:/1733260714236/Put/seqid=0 2024-12-03T21:18:34,246 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:34,246 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK], DatanodeInfoWithStorage[127.0.0.1:36867,DS-96f79e41-4560-4ad8-bd94-6ac121270bfd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK]) is bad. 2024-12-03T21:18:34,246 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741873_1056 2024-12-03T21:18:34,247 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39985,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK] 2024-12-03T21:18:34,249 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:34,249 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK], DatanodeInfoWithStorage[127.0.0.1:39079,DS-d9f14fe1-296a-4d65-a4ba-846dfb4113e9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK]) is bad. 2024-12-03T21:18:34,249 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741874_1057 2024-12-03T21:18:34,250 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK] 2024-12-03T21:18:34,253 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39781 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:34,253 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:39286 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741875_1058] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data6]'}, localName='127.0.0.1:36867', datanodeUuid='f23cfe64-c25c-42a3-9017-a7eff60f0915', xmitsInProgress=0}:Exception transferring block BP-698693125-172.17.0.2-1733260689316:blk_1073741875_1058 to mirror 127.0.0.1:39781 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:34,253 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36867,DS-96f79e41-4560-4ad8-bd94-6ac121270bfd,DISK], DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]) is bad. 2024-12-03T21:18:34,253 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:39286 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741875_1058] {}] datanode.BlockReceiver(316): Block 1073741875 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T21:18:34,253 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741875_1058 2024-12-03T21:18:34,253 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:39286 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741875_1058] {}] datanode.DataXceiver(331): 127.0.0.1:36867:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39286 dst: /127.0.0.1:36867 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:34,254 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK] 2024-12-03T21:18:34,255 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:34,256 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39079,DS-d9f14fe1-296a-4d65-a4ba-846dfb4113e9,DISK], DatanodeInfoWithStorage[127.0.0.1:36867,DS-96f79e41-4560-4ad8-bd94-6ac121270bfd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39079,DS-d9f14fe1-296a-4d65-a4ba-846dfb4113e9,DISK]) is bad. 2024-12-03T21:18:34,256 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741876_1059 2024-12-03T21:18:34,256 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39079,DS-d9f14fe1-296a-4d65-a4ba-846dfb4113e9,DISK] 2024-12-03T21:18:34,257 WARN [IPC Server handler 4 on default port 36575 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-03T21:18:34,258 WARN [IPC Server handler 4 on default port 36575 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-03T21:18:34,258 WARN [IPC Server handler 4 on default port 36575 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-03T21:18:34,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36867 is added to blk_1073741877_1060 (size=6027) 2024-12-03T21:18:34,663 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/.tmp/info/551c395b90614e99a915554bce203deb 2024-12-03T21:18:34,670 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/.tmp/info/551c395b90614e99a915554bce203deb as hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/551c395b90614e99a915554bce203deb 2024-12-03T21:18:34,676 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/551c395b90614e99a915554bce203deb, entries=1, sequenceid=45, filesize=5.9 K 2024-12-03T21:18:34,677 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 8c36c7ec95832727c0c6dc4110c8f353 in 440ms, sequenceid=45, compaction requested=false 2024-12-03T21:18:34,678 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8c36c7ec95832727c0c6dc4110c8f353: 2024-12-03T21:18:34,678 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-12-03T21:18:34,678 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T21:18:34,678 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/6ee052dc2fd74cd791653bab1d534eff because midkey is the same as first or last row 2024-12-03T21:18:34,777 WARN [regionserver/101545f66cbd:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-12-03T21:18:34,777 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:34,857 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:18:34,860 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T21:18:34,861 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T21:18:34,861 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T21:18:34,861 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T21:18:34,862 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c598caf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/hadoop.log.dir/,AVAILABLE} 2024-12-03T21:18:34,862 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@d3b3ece{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T21:18:34,911 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:34,952 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4f470d88{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/java.io.tmpdir/jetty-localhost-41601-hadoop-hdfs-3_4_1-tests_jar-_-any-1523924792818169158/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:18:34,953 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@261adbd7{HTTP/1.1, (http/1.1)}{localhost:41601} 2024-12-03T21:18:34,953 INFO [Time-limited test {}] server.Server(415): Started @128051ms 2024-12-03T21:18:34,954 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T21:18:35,243 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@13c7678a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36867, datanodeUuid=f23cfe64-c25c-42a3-9017-a7eff60f0915, infoPort=45031, infoSecurePort=0, ipcPort=37773, storageInfo=lv=-57;cid=testClusterID;nsid=1541511070;c=1733260689316):Failed to transfer BP-698693125-172.17.0.2-1733260689316:blk_1073741867_1050 to 127.0.0.1:39781 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:35,243 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5fb17874[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36867, datanodeUuid=f23cfe64-c25c-42a3-9017-a7eff60f0915, infoPort=45031, infoSecurePort=0, ipcPort=37773, storageInfo=lv=-57;cid=testClusterID;nsid=1541511070;c=1733260689316):Failed to transfer BP-698693125-172.17.0.2-1733260689316:blk_1073741872_1055 to 127.0.0.1:46411 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:35,327 WARN [Thread-971 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T21:18:35,335 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x363d86b133935bf6 with lease ID 0x4bbc22489a7baa55: from storage DS-c933cce1-4c78-4076-82ad-cc6d66d06618 node DatanodeRegistration(127.0.0.1:43547, datanodeUuid=cbf1eb3c-64f4-4d13-a033-84daec941fc5, infoPort=33649, infoSecurePort=0, ipcPort=40829, storageInfo=lv=-57;cid=testClusterID;nsid=1541511070;c=1733260689316), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:18:35,335 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x363d86b133935bf6 with lease ID 0x4bbc22489a7baa55: from storage DS-15d26117-d0e6-4d32-99c4-a1282fe98f6e node DatanodeRegistration(127.0.0.1:43547, datanodeUuid=cbf1eb3c-64f4-4d13-a033-84daec941fc5, infoPort=33649, infoSecurePort=0, ipcPort=40829, storageInfo=lv=-57;cid=testClusterID;nsid=1541511070;c=1733260689316), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:18:35,699 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:36,778 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:36,911 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:37,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43547 is added to blk_1073741877_1060 (size=6027) 2024-12-03T21:18:37,700 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:38,778 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:38,911 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:39,700 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:40,779 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:40,912 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:41,341 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T21:18:41,701 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:41,944 ERROR [FSHLog-0-hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData-prefix:101545f66cbd,36475,1733260691366 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:41,944 WARN [FSHLog-0-hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData-prefix:101545f66cbd,36475,1733260691366 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:41,944 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 101545f66cbd%2C36475%2C1733260691366:(num 1733260691718) roll requested 2024-12-03T21:18:41,944 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C36475%2C1733260691366.1733260721944 2024-12-03T21:18:41,947 WARN [Thread-992 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:41,948 WARN [Thread-992 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39079,DS-d9f14fe1-296a-4d65-a4ba-846dfb4113e9,DISK], DatanodeInfoWithStorage[127.0.0.1:36867,DS-96f79e41-4560-4ad8-bd94-6ac121270bfd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39079,DS-d9f14fe1-296a-4d65-a4ba-846dfb4113e9,DISK]) is bad. 2024-12-03T21:18:41,948 WARN [Thread-992 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741878_1061 2024-12-03T21:18:41,949 WARN [Thread-992 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39079,DS-d9f14fe1-296a-4d65-a4ba-846dfb4113e9,DISK] 2024-12-03T21:18:41,952 WARN [Thread-992 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46411 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:41,952 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1864487884_22 at /127.0.0.1:39298 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741879_1062] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data6]'}, localName='127.0.0.1:36867', datanodeUuid='f23cfe64-c25c-42a3-9017-a7eff60f0915', xmitsInProgress=0}:Exception transferring block BP-698693125-172.17.0.2-1733260689316:blk_1073741879_1062 to mirror 127.0.0.1:46411 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:41,952 WARN [Thread-992 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36867,DS-96f79e41-4560-4ad8-bd94-6ac121270bfd,DISK], DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK]) is bad. 2024-12-03T21:18:41,952 WARN [Thread-992 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741879_1062 2024-12-03T21:18:41,952 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1864487884_22 at /127.0.0.1:39298 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741879_1062] {}] datanode.BlockReceiver(316): Block 1073741879 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-03T21:18:41,952 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1864487884_22 at /127.0.0.1:39298 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741879_1062] {}] datanode.DataXceiver(331): 127.0.0.1:36867:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39298 dst: /127.0.0.1:36867 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:41,953 WARN [Thread-992 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK] 2024-12-03T21:18:41,960 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:41,960 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:41,961 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:41,961 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:41,961 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:41,961 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/WALs/101545f66cbd,36475,1733260691366/101545f66cbd%2C36475%2C1733260691366.1733260691718 with entries=54, filesize=26.68 KB; new WAL /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/WALs/101545f66cbd,36475,1733260691366/101545f66cbd%2C36475%2C1733260691366.1733260721944 2024-12-03T21:18:41,961 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:41,962 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:41,962 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/WALs/101545f66cbd,36475,1733260691366/101545f66cbd%2C36475%2C1733260691366.1733260691718 2024-12-03T21:18:41,962 WARN [IPC Server handler 3 on default port 36575 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/WALs/101545f66cbd,36475,1733260691366/101545f66cbd%2C36475%2C1733260691366.1733260691718 has not been closed. Lease recovery is in progress. RecoveryId = 1064 for block blk_1073741830_1006 2024-12-03T21:18:41,962 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/WALs/101545f66cbd,36475,1733260691366/101545f66cbd%2C36475%2C1733260691366.1733260691718 after 0ms 2024-12-03T21:18:41,963 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33649:33649),(127.0.0.1/127.0.0.1:45031:45031)] 2024-12-03T21:18:41,963 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/WALs/101545f66cbd,36475,1733260691366/101545f66cbd%2C36475%2C1733260691366.1733260691718 is not closed yet, will try archiving it next time 2024-12-03T21:18:42,779 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:42,912 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:44,780 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:44,913 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:45,348 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@52c27844 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-698693125-172.17.0.2-1733260689316:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:39781,null,null]) java.net.ConnectException: Call From 101545f66cbd/172.17.0.2 to localhost:33003 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-03T21:18:45,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43547 is added to blk_1073741833_1020 (size=455) 2024-12-03T21:18:45,736 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260692158 to hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/oldWALs/101545f66cbd%2C45695%2C1733260691542.1733260692158 2024-12-03T21:18:45,739 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260710731 to hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/oldWALs/101545f66cbd%2C45695%2C1733260691542.1733260710731 2024-12-03T21:18:45,963 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/WALs/101545f66cbd,36475,1733260691366/101545f66cbd%2C36475%2C1733260691366.1733260691718 after 4001ms 2024-12-03T21:18:46,333 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@635a0fe5[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43547, datanodeUuid=cbf1eb3c-64f4-4d13-a033-84daec941fc5, infoPort=33649, infoSecurePort=0, ipcPort=40829, storageInfo=lv=-57;cid=testClusterID;nsid=1541511070;c=1733260689316):Failed to transfer BP-698693125-172.17.0.2-1733260689316:blk_1073741833_1020 to 127.0.0.1:46411 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:46,780 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:46,913 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:48,781 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:48,914 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:50,546 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C45695%2C1733260691542.1733260730545 2024-12-03T21:18:50,557 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:50,557 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:50,557 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:50,557 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:50,558 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:50,558 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260712754 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260730545 2024-12-03T21:18:50,560 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45031:45031),(127.0.0.1/127.0.0.1:33649:33649)] 2024-12-03T21:18:50,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36867 is added to blk_1073741862_1045 (size=13591) 2024-12-03T21:18:50,560 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260712754 is not closed yet, will try archiving it next time 2024-12-03T21:18:50,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45695 {}] regionserver.HRegion(8855): Flush requested on 8c36c7ec95832727c0c6dc4110c8f353 2024-12-03T21:18:50,572 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8c36c7ec95832727c0c6dc4110c8f353 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-03T21:18:50,577 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/.tmp/info/c965a6f362cd4a6eaeaca237c87cb605 is 1080, key is row0013/info:/1733260730562/Put/seqid=0 2024-12-03T21:18:50,579 WARN [Thread-1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:50,580 WARN [Thread-1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741882_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK], DatanodeInfoWithStorage[127.0.0.1:43547,DS-c933cce1-4c78-4076-82ad-cc6d66d06618,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK]) is bad. 2024-12-03T21:18:50,580 WARN [Thread-1009 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741882_1066 2024-12-03T21:18:50,580 WARN [Thread-1009 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK] 2024-12-03T21:18:50,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43547 is added to blk_1073741883_1067 (size=11421) 2024-12-03T21:18:50,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36867 is added to blk_1073741883_1067 (size=11421) 2024-12-03T21:18:50,587 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/.tmp/info/c965a6f362cd4a6eaeaca237c87cb605 2024-12-03T21:18:50,594 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/.tmp/info/c965a6f362cd4a6eaeaca237c87cb605 as hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/c965a6f362cd4a6eaeaca237c87cb605 2024-12-03T21:18:50,600 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/c965a6f362cd4a6eaeaca237c87cb605, entries=6, sequenceid=55, filesize=11.2 K 2024-12-03T21:18:50,601 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=6.30 KB/6455 for 8c36c7ec95832727c0c6dc4110c8f353 in 29ms, sequenceid=55, compaction requested=true 2024-12-03T21:18:50,601 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8c36c7ec95832727c0c6dc4110c8f353: 2024-12-03T21:18:50,602 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.6 K, sizeToCheck=16.0 K 2024-12-03T21:18:50,602 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T21:18:50,602 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/6ee052dc2fd74cd791653bab1d534eff because midkey is the same as first or last row 2024-12-03T21:18:50,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8c36c7ec95832727c0c6dc4110c8f353:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T21:18:50,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T21:18:50,602 DEBUG [RS:0;101545f66cbd:45695-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T21:18:50,603 DEBUG [RS:0;101545f66cbd:45695-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35442 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T21:18:50,603 DEBUG [RS:0;101545f66cbd:45695-longCompactions-0 {}] regionserver.HStore(1541): 8c36c7ec95832727c0c6dc4110c8f353/info is initiating minor compaction (all files) 2024-12-03T21:18:50,603 INFO [RS:0;101545f66cbd:45695-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8c36c7ec95832727c0c6dc4110c8f353/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353. 2024-12-03T21:18:50,603 INFO [RS:0;101545f66cbd:45695-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/6ee052dc2fd74cd791653bab1d534eff, hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/551c395b90614e99a915554bce203deb, hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/c965a6f362cd4a6eaeaca237c87cb605] into tmpdir=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/.tmp, totalSize=34.6 K 2024-12-03T21:18:50,604 DEBUG [RS:0;101545f66cbd:45695-longCompactions-0 {}] compactions.Compactor(225): Compacting 6ee052dc2fd74cd791653bab1d534eff, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733260706717 2024-12-03T21:18:50,604 DEBUG [RS:0;101545f66cbd:45695-longCompactions-0 {}] compactions.Compactor(225): Compacting 551c395b90614e99a915554bce203deb, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1733260714236 2024-12-03T21:18:50,605 DEBUG [RS:0;101545f66cbd:45695-longCompactions-0 {}] compactions.Compactor(225): Compacting c965a6f362cd4a6eaeaca237c87cb605, keycount=6, bloomtype=ROW, size=11.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733260714647 2024-12-03T21:18:50,622 INFO [RS:0;101545f66cbd:45695-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8c36c7ec95832727c0c6dc4110c8f353#info#compaction#24 average throughput is 8.72 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T21:18:50,623 DEBUG [RS:0;101545f66cbd:45695-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/.tmp/info/184954e9cb424a399505794945f6068c is 1080, key is row0002/info:/1733260706717/Put/seqid=0 2024-12-03T21:18:50,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43547 is added to blk_1073741884_1068 (size=23502) 2024-12-03T21:18:50,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36867 is added to blk_1073741884_1068 (size=23502) 2024-12-03T21:18:50,635 DEBUG [RS:0;101545f66cbd:45695-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/.tmp/info/184954e9cb424a399505794945f6068c as hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/184954e9cb424a399505794945f6068c 2024-12-03T21:18:50,642 INFO [RS:0;101545f66cbd:45695-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8c36c7ec95832727c0c6dc4110c8f353/info of 8c36c7ec95832727c0c6dc4110c8f353 into 184954e9cb424a399505794945f6068c(size=23.0 K), total size for store is 23.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T21:18:50,642 DEBUG [RS:0;101545f66cbd:45695-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8c36c7ec95832727c0c6dc4110c8f353: 2024-12-03T21:18:50,643 INFO [RS:0;101545f66cbd:45695-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353., storeName=8c36c7ec95832727c0c6dc4110c8f353/info, priority=13, startTime=1733260730602; duration=0sec 2024-12-03T21:18:50,643 DEBUG [RS:0;101545f66cbd:45695-longCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-12-03T21:18:50,643 DEBUG [RS:0;101545f66cbd:45695-longCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T21:18:50,643 DEBUG [RS:0;101545f66cbd:45695-longCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/184954e9cb424a399505794945f6068c because midkey is the same as first or last row 2024-12-03T21:18:50,643 DEBUG [RS:0;101545f66cbd:45695-longCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-12-03T21:18:50,643 DEBUG [RS:0;101545f66cbd:45695-longCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T21:18:50,643 DEBUG [RS:0;101545f66cbd:45695-longCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/184954e9cb424a399505794945f6068c because midkey is the same as first or last row 2024-12-03T21:18:50,643 DEBUG [RS:0;101545f66cbd:45695-longCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-12-03T21:18:50,643 DEBUG [RS:0;101545f66cbd:45695-longCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T21:18:50,643 DEBUG [RS:0;101545f66cbd:45695-longCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/184954e9cb424a399505794945f6068c because midkey is the same as first or last row 2024-12-03T21:18:50,643 DEBUG [RS:0;101545f66cbd:45695-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T21:18:50,643 DEBUG [RS:0;101545f66cbd:45695-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8c36c7ec95832727c0c6dc4110c8f353:info 2024-12-03T21:18:50,781 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-12-03T21:18:50,781 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:50,786 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-03T21:18:50,787 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T21:18:50,787 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T21:18:50,787 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:18:50,787 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:18:50,787 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T21:18:50,787 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T21:18:50,788 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=900664206, stopped=false 2024-12-03T21:18:50,788 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=101545f66cbd,36475,1733260691366 2024-12-03T21:18:50,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45695-0x1019e58f9790001, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T21:18:50,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36475-0x1019e58f9790000, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T21:18:50,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39741-0x1019e58f9790002, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T21:18:50,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45695-0x1019e58f9790001, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:50,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39741-0x1019e58f9790002, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:50,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36475-0x1019e58f9790000, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:50,813 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T21:18:50,814 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T21:18:50,814 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T21:18:50,815 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:18:50,815 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39741-0x1019e58f9790002, quorum=127.0.0.1:59685, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T21:18:50,815 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36475-0x1019e58f9790000, quorum=127.0.0.1:59685, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T21:18:50,815 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45695-0x1019e58f9790001, quorum=127.0.0.1:59685, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T21:18:50,815 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '101545f66cbd,45695,1733260691542' ***** 2024-12-03T21:18:50,815 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T21:18:50,815 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '101545f66cbd,39741,1733260692808' ***** 2024-12-03T21:18:50,816 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T21:18:50,816 INFO [RS:0;101545f66cbd:45695 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T21:18:50,816 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T21:18:50,816 INFO [RS:0;101545f66cbd:45695 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T21:18:50,816 INFO [RS:0;101545f66cbd:45695 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T21:18:50,817 INFO [RS:0;101545f66cbd:45695 {}] regionserver.HRegionServer(3091): Received CLOSE for 8c36c7ec95832727c0c6dc4110c8f353 2024-12-03T21:18:50,817 INFO [RS:1;101545f66cbd:39741 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T21:18:50,817 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T21:18:50,817 INFO [RS:1;101545f66cbd:39741 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T21:18:50,817 INFO [RS:1;101545f66cbd:39741 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T21:18:50,817 INFO [RS:1;101545f66cbd:39741 {}] regionserver.HRegionServer(959): stopping server 101545f66cbd,39741,1733260692808 2024-12-03T21:18:50,817 INFO [RS:1;101545f66cbd:39741 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T21:18:50,817 INFO [RS:0;101545f66cbd:45695 {}] regionserver.HRegionServer(959): stopping server 101545f66cbd,45695,1733260691542 2024-12-03T21:18:50,818 INFO [RS:0;101545f66cbd:45695 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T21:18:50,818 INFO [RS:1;101545f66cbd:39741 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;101545f66cbd:39741. 2024-12-03T21:18:50,818 INFO [RS:0;101545f66cbd:45695 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;101545f66cbd:45695. 2024-12-03T21:18:50,818 DEBUG [RS:1;101545f66cbd:39741 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T21:18:50,818 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 8c36c7ec95832727c0c6dc4110c8f353, disabling compactions & flushes 2024-12-03T21:18:50,818 DEBUG [RS:1;101545f66cbd:39741 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:18:50,818 INFO [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353. 2024-12-03T21:18:50,818 DEBUG [RS:0;101545f66cbd:45695 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T21:18:50,818 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353. 2024-12-03T21:18:50,818 DEBUG [RS:0;101545f66cbd:45695 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:18:50,818 INFO [RS:1;101545f66cbd:39741 {}] regionserver.HRegionServer(976): stopping server 101545f66cbd,39741,1733260692808; all regions closed. 2024-12-03T21:18:50,818 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353. after waiting 0 ms 2024-12-03T21:18:50,818 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353. 2024-12-03T21:18:50,818 INFO [RS:0;101545f66cbd:45695 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T21:18:50,818 INFO [RS:0;101545f66cbd:45695 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T21:18:50,818 INFO [RS:0;101545f66cbd:45695 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T21:18:50,818 INFO [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 8c36c7ec95832727c0c6dc4110c8f353 1/1 column families, dataSize=6.30 KB heapSize=7 KB 2024-12-03T21:18:50,818 INFO [RS:0;101545f66cbd:45695 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-03T21:18:50,819 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:50,820 INFO [RS:0;101545f66cbd:45695 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-03T21:18:50,820 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:50,820 DEBUG [RS:0;101545f66cbd:45695 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 8c36c7ec95832727c0c6dc4110c8f353=TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353.} 2024-12-03T21:18:50,820 DEBUG [RS:0;101545f66cbd:45695 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 8c36c7ec95832727c0c6dc4110c8f353 2024-12-03T21:18:50,820 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T21:18:50,820 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:50,820 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T21:18:50,820 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T21:18:50,820 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:50,820 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T21:18:50,820 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:50,820 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T21:18:50,820 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-12-03T21:18:50,820 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:50,821 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:50,821 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 2024-12-03T21:18:50,821 ERROR [FSHLog-0-hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31-prefix:101545f66cbd,45695,1733260691542.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:50,821 WARN [FSHLog-0-hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31-prefix:101545f66cbd,45695,1733260691542.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:50,821 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 101545f66cbd%2C45695%2C1733260691542.meta:.meta(num 1733260692594) roll requested 2024-12-03T21:18:50,821 WARN [IPC Server handler 0 on default port 36575 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 has not been closed. Lease recovery is in progress. RecoveryId = 1069 for block blk_1073741837_1013 2024-12-03T21:18:50,821 INFO [regionserver/101545f66cbd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C45695%2C1733260691542.meta.1733260730821.meta 2024-12-03T21:18:50,821 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 after 0ms 2024-12-03T21:18:50,824 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/.tmp/info/dadad822d301473191a49df5f7e5275e is 1080, key is row0018/info:/1733260730574/Put/seqid=0 2024-12-03T21:18:50,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36867 is added to blk_1073741885_1070 (size=11421) 2024-12-03T21:18:50,831 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:50,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43547 is added to blk_1073741885_1070 (size=11421) 2024-12-03T21:18:50,831 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:50,831 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:50,831 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:50,831 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:50,832 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260730821.meta 2024-12-03T21:18:50,832 INFO [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.30 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/.tmp/info/dadad822d301473191a49df5f7e5275e 2024-12-03T21:18:50,832 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:50,832 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39781,DS-17c1eafd-226f-4ce0-9a08-6ae23a9af18c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:50,832 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta 2024-12-03T21:18:50,832 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45031:45031),(127.0.0.1/127.0.0.1:33649:33649)] 2024-12-03T21:18:50,832 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta is not closed yet, will try archiving it next time 2024-12-03T21:18:50,832 WARN [IPC Server handler 2 on default port 36575 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta has not been closed. Lease recovery is in progress. RecoveryId = 1072 for block blk_1073741834_1010 2024-12-03T21:18:50,833 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta after 1ms 2024-12-03T21:18:50,838 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/.tmp/info/dadad822d301473191a49df5f7e5275e as hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/dadad822d301473191a49df5f7e5275e 2024-12-03T21:18:50,845 INFO [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/dadad822d301473191a49df5f7e5275e, entries=6, sequenceid=65, filesize=11.2 K 2024-12-03T21:18:50,846 INFO [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~6.30 KB/6455, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 8c36c7ec95832727c0c6dc4110c8f353 in 28ms, sequenceid=65, compaction requested=false 2024-12-03T21:18:50,846 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/424ec8301993470db811b6ff3f86aea2, hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/63c85944ae564c3cbdca37cf9eb4e0c0, hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/6ee052dc2fd74cd791653bab1d534eff, hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/8176a5042f4b418fb5b17147d437afeb, hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/551c395b90614e99a915554bce203deb, hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/c965a6f362cd4a6eaeaca237c87cb605] to archive 2024-12-03T21:18:50,847 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/hbase/meta/1588230740/.tmp/info/559e392b5471484487d06d5b01411610 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353./info:regioninfo/1733260693339/Put/seqid=0 2024-12-03T21:18:50,848 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T21:18:50,849 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/424ec8301993470db811b6ff3f86aea2 to hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/424ec8301993470db811b6ff3f86aea2 2024-12-03T21:18:50,850 WARN [Thread-1036 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1073 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46411 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:50,850 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:33430 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741887_1073] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data6]'}, localName='127.0.0.1:36867', datanodeUuid='f23cfe64-c25c-42a3-9017-a7eff60f0915', xmitsInProgress=0}:Exception transferring block BP-698693125-172.17.0.2-1733260689316:blk_1073741887_1073 to mirror 127.0.0.1:46411 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:50,850 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:33430 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741887_1073] {}] datanode.BlockReceiver(316): Block 1073741887 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T21:18:50,850 WARN [Thread-1036 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741887_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36867,DS-96f79e41-4560-4ad8-bd94-6ac121270bfd,DISK], DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK]) is bad. 2024-12-03T21:18:50,850 WARN [Thread-1036 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741887_1073 2024-12-03T21:18:50,850 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:33430 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741887_1073] {}] datanode.DataXceiver(331): 127.0.0.1:36867:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33430 dst: /127.0.0.1:36867 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:50,851 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/63c85944ae564c3cbdca37cf9eb4e0c0 to hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/63c85944ae564c3cbdca37cf9eb4e0c0 2024-12-03T21:18:50,851 WARN [Thread-1036 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK] 2024-12-03T21:18:50,852 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/6ee052dc2fd74cd791653bab1d534eff to hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/6ee052dc2fd74cd791653bab1d534eff 2024-12-03T21:18:50,853 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/8176a5042f4b418fb5b17147d437afeb to hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/8176a5042f4b418fb5b17147d437afeb 2024-12-03T21:18:50,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43547 is added to blk_1073741888_1074 (size=7089) 2024-12-03T21:18:50,855 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/551c395b90614e99a915554bce203deb to hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/551c395b90614e99a915554bce203deb 2024-12-03T21:18:50,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36867 is added to blk_1073741888_1074 (size=7089) 2024-12-03T21:18:50,855 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/hbase/meta/1588230740/.tmp/info/559e392b5471484487d06d5b01411610 2024-12-03T21:18:50,856 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/c965a6f362cd4a6eaeaca237c87cb605 to hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/info/c965a6f362cd4a6eaeaca237c87cb605 2024-12-03T21:18:50,857 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=101545f66cbd:36475 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-03T21:18:50,857 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [424ec8301993470db811b6ff3f86aea2=10347, 63c85944ae564c3cbdca37cf9eb4e0c0=12506, 6ee052dc2fd74cd791653bab1d534eff=17994, 8176a5042f4b418fb5b17147d437afeb=6027, 551c395b90614e99a915554bce203deb=6027, c965a6f362cd4a6eaeaca237c87cb605=11421] 2024-12-03T21:18:50,861 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8c36c7ec95832727c0c6dc4110c8f353/recovered.edits/68.seqid, newMaxSeqId=68, maxSeqId=1 2024-12-03T21:18:50,862 INFO [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353. 2024-12-03T21:18:50,862 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 8c36c7ec95832727c0c6dc4110c8f353: Waiting for close lock at 1733260730818Running coprocessor pre-close hooks at 1733260730818Disabling compacts and flushes for region at 1733260730818Disabling writes for close at 1733260730818Obtaining lock to block concurrent updates at 1733260730818Preparing flush snapshotting stores in 8c36c7ec95832727c0c6dc4110c8f353 at 1733260730818Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353., syncing WAL and waiting on mvcc, flushsize=dataSize=6455, getHeapSize=7152, getOffHeapSize=0, getCellsCount=6 at 1733260730819 (+1 ms)Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353. at 1733260730820 (+1 ms)Flushing 8c36c7ec95832727c0c6dc4110c8f353/info: creating writer at 1733260730820Flushing 8c36c7ec95832727c0c6dc4110c8f353/info: appending metadata at 1733260730823 (+3 ms)Flushing 8c36c7ec95832727c0c6dc4110c8f353/info: closing flushed file at 1733260730823Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@53a08da7: reopening flushed file at 1733260730837 (+14 ms)Finished flush of dataSize ~6.30 KB/6455, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 8c36c7ec95832727c0c6dc4110c8f353 in 28ms, sequenceid=65, compaction requested=false at 1733260730846 (+9 ms)Writing region close event to WAL at 1733260730858 (+12 ms)Running coprocessor post-close hooks at 1733260730862 (+4 ms)Closed at 1733260730862 2024-12-03T21:18:50,863 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733260692964.8c36c7ec95832727c0c6dc4110c8f353. 2024-12-03T21:18:50,874 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/hbase/meta/1588230740/.tmp/ns/a5534c9fc8634da6b994b704a21bed37 is 43, key is default/ns:d/1733260692681/Put/seqid=0 2024-12-03T21:18:50,877 WARN [Thread-1044 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1075 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46411 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:50,877 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:33442 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741889_1075] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data6]'}, localName='127.0.0.1:36867', datanodeUuid='f23cfe64-c25c-42a3-9017-a7eff60f0915', xmitsInProgress=0}:Exception transferring block BP-698693125-172.17.0.2-1733260689316:blk_1073741889_1075 to mirror 127.0.0.1:46411 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:50,877 WARN [Thread-1044 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741889_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36867,DS-96f79e41-4560-4ad8-bd94-6ac121270bfd,DISK], DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK]) is bad. 2024-12-03T21:18:50,877 WARN [Thread-1044 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741889_1075 2024-12-03T21:18:50,877 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:33442 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741889_1075] {}] datanode.BlockReceiver(316): Block 1073741889 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T21:18:50,877 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1114377622_22 at /127.0.0.1:33442 [Receiving block BP-698693125-172.17.0.2-1733260689316:blk_1073741889_1075] {}] datanode.DataXceiver(331): 127.0.0.1:36867:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33442 dst: /127.0.0.1:36867 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:50,877 WARN [Thread-1044 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK] 2024-12-03T21:18:50,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36867 is added to blk_1073741890_1076 (size=5153) 2024-12-03T21:18:50,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43547 is added to blk_1073741890_1076 (size=5153) 2024-12-03T21:18:50,882 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/hbase/meta/1588230740/.tmp/ns/a5534c9fc8634da6b994b704a21bed37 2024-12-03T21:18:50,901 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/hbase/meta/1588230740/.tmp/table/d87aa55439944501b350c82cba4aa8d1 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1733260693349/Put/seqid=0 2024-12-03T21:18:50,902 WARN [Thread-1051 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1077 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:18:50,902 WARN [Thread-1051 {}] hdfs.DataStreamer(1731): Error Recovery for BP-698693125-172.17.0.2-1733260689316:blk_1073741891_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK], DatanodeInfoWithStorage[127.0.0.1:36867,DS-96f79e41-4560-4ad8-bd94-6ac121270bfd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK]) is bad. 2024-12-03T21:18:50,903 WARN [Thread-1051 {}] hdfs.DataStreamer(1850): Abandoning BP-698693125-172.17.0.2-1733260689316:blk_1073741891_1077 2024-12-03T21:18:50,903 WARN [Thread-1051 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46411,DS-a05bc428-fee0-49f9-83de-bea8137d8ce4,DISK] 2024-12-03T21:18:50,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36867 is added to blk_1073741892_1078 (size=5424) 2024-12-03T21:18:50,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43547 is added to blk_1073741892_1078 (size=5424) 2024-12-03T21:18:50,908 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/hbase/meta/1588230740/.tmp/table/d87aa55439944501b350c82cba4aa8d1 2024-12-03T21:18:50,909 INFO [regionserver/101545f66cbd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-03T21:18:50,909 INFO [regionserver/101545f66cbd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-03T21:18:50,911 INFO [regionserver/101545f66cbd:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T21:18:50,913 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/hbase/meta/1588230740/.tmp/info/559e392b5471484487d06d5b01411610 as hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/hbase/meta/1588230740/info/559e392b5471484487d06d5b01411610 2024-12-03T21:18:50,919 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/hbase/meta/1588230740/info/559e392b5471484487d06d5b01411610, entries=10, sequenceid=11, filesize=6.9 K 2024-12-03T21:18:50,920 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/hbase/meta/1588230740/.tmp/ns/a5534c9fc8634da6b994b704a21bed37 as hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/hbase/meta/1588230740/ns/a5534c9fc8634da6b994b704a21bed37 2024-12-03T21:18:50,926 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/hbase/meta/1588230740/ns/a5534c9fc8634da6b994b704a21bed37, entries=2, sequenceid=11, filesize=5.0 K 2024-12-03T21:18:50,928 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/hbase/meta/1588230740/.tmp/table/d87aa55439944501b350c82cba4aa8d1 as hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/hbase/meta/1588230740/table/d87aa55439944501b350c82cba4aa8d1 2024-12-03T21:18:50,933 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/hbase/meta/1588230740/table/d87aa55439944501b350c82cba4aa8d1, entries=2, sequenceid=11, filesize=5.3 K 2024-12-03T21:18:50,934 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 114ms, sequenceid=11, compaction requested=false 2024-12-03T21:18:50,939 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-03T21:18:50,939 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T21:18:50,939 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T21:18:50,940 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733260730820Running coprocessor pre-close hooks at 1733260730820Disabling compacts and flushes for region at 1733260730820Disabling writes for close at 1733260730820Obtaining lock to block concurrent updates at 1733260730820Preparing flush snapshotting stores in 1588230740 at 1733260730820Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1733260730821 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733260730833 (+12 ms)Flushing 1588230740/info: creating writer at 1733260730833Flushing 1588230740/info: appending metadata at 1733260730847 (+14 ms)Flushing 1588230740/info: closing flushed file at 1733260730847Flushing 1588230740/ns: creating writer at 1733260730861 (+14 ms)Flushing 1588230740/ns: appending metadata at 1733260730874 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1733260730874Flushing 1588230740/table: creating writer at 1733260730888 (+14 ms)Flushing 1588230740/table: appending metadata at 1733260730900 (+12 ms)Flushing 1588230740/table: closing flushed file at 1733260730900Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6c4fc867: reopening flushed file at 1733260730912 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@24a396dc: reopening flushed file at 1733260730919 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@55e7d069: reopening flushed file at 1733260730927 (+8 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 114ms, sequenceid=11, compaction requested=false at 1733260730934 (+7 ms)Writing region close event to WAL at 1733260730935 (+1 ms)Running coprocessor post-close hooks at 1733260730939 (+4 ms)Closed at 1733260730939 2024-12-03T21:18:50,940 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-03T21:18:50,961 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.1733260712754 to hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/oldWALs/101545f66cbd%2C45695%2C1733260691542.1733260712754 2024-12-03T21:18:51,020 INFO [RS:0;101545f66cbd:45695 {}] regionserver.HRegionServer(976): stopping server 101545f66cbd,45695,1733260691542; all regions closed. 2024-12-03T21:18:51,021 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:51,021 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:51,021 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:51,021 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:51,021 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:51,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43547 is added to blk_1073741886_1071 (size=825) 2024-12-03T21:18:51,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36867 is added to blk_1073741886_1071 (size=825) 2024-12-03T21:18:51,026 INFO [regionserver/101545f66cbd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-03T21:18:51,026 INFO [regionserver/101545f66cbd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-03T21:18:52,027 INFO [regionserver/101545f66cbd:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T21:18:52,332 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@635a0fe5[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43547, datanodeUuid=cbf1eb3c-64f4-4d13-a033-84daec941fc5, infoPort=33649, infoSecurePort=0, ipcPort=40829, storageInfo=lv=-57;cid=testClusterID;nsid=1541511070;c=1733260689316):Failed to transfer BP-698693125-172.17.0.2-1733260689316:blk_1073741836_1012 to 127.0.0.1:46411 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:52,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36867 is added to blk_1073741832_1008 (size=32) 2024-12-03T21:18:52,725 INFO [master/101545f66cbd:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-03T21:18:52,725 INFO [master/101545f66cbd:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-03T21:18:53,333 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@32ed870b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43547, datanodeUuid=cbf1eb3c-64f4-4d13-a033-84daec941fc5, infoPort=33649, infoSecurePort=0, ipcPort=40829, storageInfo=lv=-57;cid=testClusterID;nsid=1541511070;c=1733260689316):Failed to transfer BP-698693125-172.17.0.2-1733260689316:blk_1073741828_1004 to 127.0.0.1:46411 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:53,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36867 is added to blk_1073741826_1002 (size=42) 2024-12-03T21:18:53,707 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-03T21:18:53,708 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T21:18:53,708 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-03T21:18:54,822 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 after 4001ms 2024-12-03T21:18:54,834 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta after 4002ms 2024-12-03T21:18:55,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43547 is added to blk_1073741862_1045 (size=13591) 2024-12-03T21:18:55,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36867 is added to blk_1073741829_1005 (size=34) 2024-12-03T21:18:55,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36867 is added to blk_1073741827_1003 (size=196) 2024-12-03T21:18:55,353 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@b5ee87b {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-698693125-172.17.0.2-1733260689316:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:39781,null,null]) java.net.ConnectException: Call From 101545f66cbd/172.17.0.2 to localhost:33003 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-03T21:18:55,821 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-03T21:18:55,824 DEBUG [RS:1;101545f66cbd:39741 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/oldWALs 2024-12-03T21:18:55,824 INFO [RS:1;101545f66cbd:39741 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 101545f66cbd%2C39741%2C1733260692808:(num 1733260693042) 2024-12-03T21:18:55,824 DEBUG [RS:1;101545f66cbd:39741 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:18:55,824 INFO [RS:1;101545f66cbd:39741 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T21:18:55,825 INFO [RS:1;101545f66cbd:39741 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T21:18:55,825 INFO [RS:1;101545f66cbd:39741 {}] hbase.ChoreService(370): Chore service for: regionserver/101545f66cbd:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-03T21:18:55,825 INFO [RS:1;101545f66cbd:39741 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T21:18:55,825 INFO [RS:1;101545f66cbd:39741 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T21:18:55,825 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T21:18:55,825 INFO [RS:1;101545f66cbd:39741 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T21:18:55,826 INFO [RS:1;101545f66cbd:39741 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T21:18:55,826 INFO [RS:1;101545f66cbd:39741 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39741 2024-12-03T21:18:55,837 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36475-0x1019e58f9790000, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T21:18:55,837 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39741-0x1019e58f9790002, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/101545f66cbd,39741,1733260692808 2024-12-03T21:18:55,837 INFO [RS:1;101545f66cbd:39741 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T21:18:55,845 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [101545f66cbd,39741,1733260692808] 2024-12-03T21:18:55,853 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/101545f66cbd,39741,1733260692808 already deleted, retry=false 2024-12-03T21:18:55,853 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 101545f66cbd,39741,1733260692808 expired; onlineServers=1 2024-12-03T21:18:55,863 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:18:55,874 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:18:55,875 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:18:55,875 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:18:55,875 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:18:55,875 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:18:55,881 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:18:55,881 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:18:55,945 INFO [RS:1;101545f66cbd:39741 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T21:18:55,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39741-0x1019e58f9790002, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T21:18:55,946 INFO [RS:1;101545f66cbd:39741 {}] regionserver.HRegionServer(1031): Exiting; stopping=101545f66cbd,39741,1733260692808; zookeeper connection closed. 2024-12-03T21:18:55,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39741-0x1019e58f9790002, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T21:18:55,946 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4655b128 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4655b128 2024-12-03T21:18:56,022 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-03T21:18:56,030 DEBUG [RS:0;101545f66cbd:45695 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/oldWALs 2024-12-03T21:18:56,030 INFO [RS:0;101545f66cbd:45695 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 101545f66cbd%2C45695%2C1733260691542.meta:.meta(num 1733260730821) 2024-12-03T21:18:56,031 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:56,031 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:56,031 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:56,031 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:56,032 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:56,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43547 is added to blk_1073741881_1065 (size=15140) 2024-12-03T21:18:56,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36867 is added to blk_1073741881_1065 (size=15140) 2024-12-03T21:18:56,036 DEBUG [RS:0;101545f66cbd:45695 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/oldWALs 2024-12-03T21:18:56,036 INFO [RS:0;101545f66cbd:45695 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 101545f66cbd%2C45695%2C1733260691542:(num 1733260730545) 2024-12-03T21:18:56,036 DEBUG [RS:0;101545f66cbd:45695 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:18:56,036 INFO [RS:0;101545f66cbd:45695 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T21:18:56,036 INFO [RS:0;101545f66cbd:45695 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T21:18:56,036 INFO [RS:0;101545f66cbd:45695 {}] hbase.ChoreService(370): Chore service for: regionserver/101545f66cbd:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-03T21:18:56,036 INFO [RS:0;101545f66cbd:45695 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T21:18:56,036 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T21:18:56,037 INFO [RS:0;101545f66cbd:45695 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45695 2024-12-03T21:18:56,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45695-0x1019e58f9790001, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/101545f66cbd,45695,1733260691542 2024-12-03T21:18:56,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36475-0x1019e58f9790000, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T21:18:56,060 INFO [RS:0;101545f66cbd:45695 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T21:18:56,070 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [101545f66cbd,45695,1733260691542] 2024-12-03T21:18:56,078 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/101545f66cbd,45695,1733260691542 already deleted, retry=false 2024-12-03T21:18:56,079 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 101545f66cbd,45695,1733260691542 expired; onlineServers=0 2024-12-03T21:18:56,079 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '101545f66cbd,36475,1733260691366' ***** 2024-12-03T21:18:56,079 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T21:18:56,079 INFO [M:0;101545f66cbd:36475 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T21:18:56,079 INFO [M:0;101545f66cbd:36475 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T21:18:56,079 DEBUG [M:0;101545f66cbd:36475 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T21:18:56,079 DEBUG [M:0;101545f66cbd:36475 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T21:18:56,079 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T21:18:56,079 DEBUG [master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.small.0-1733260691945 {}] cleaner.HFileCleaner(306): Exit Thread[master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.small.0-1733260691945,5,FailOnTimeoutGroup] 2024-12-03T21:18:56,079 DEBUG [master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.large.0-1733260691945 {}] cleaner.HFileCleaner(306): Exit Thread[master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.large.0-1733260691945,5,FailOnTimeoutGroup] 2024-12-03T21:18:56,079 INFO [M:0;101545f66cbd:36475 {}] hbase.ChoreService(370): Chore service for: master/101545f66cbd:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-03T21:18:56,079 INFO [M:0;101545f66cbd:36475 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T21:18:56,080 DEBUG [M:0;101545f66cbd:36475 {}] master.HMaster(1795): Stopping service threads 2024-12-03T21:18:56,080 INFO [M:0;101545f66cbd:36475 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T21:18:56,080 INFO [M:0;101545f66cbd:36475 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T21:18:56,080 INFO [M:0;101545f66cbd:36475 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T21:18:56,080 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T21:18:56,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36475-0x1019e58f9790000, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T21:18:56,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36475-0x1019e58f9790000, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:56,087 DEBUG [M:0;101545f66cbd:36475 {}] zookeeper.ZKUtil(347): master:36475-0x1019e58f9790000, quorum=127.0.0.1:59685, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-03T21:18:56,087 WARN [M:0;101545f66cbd:36475 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-03T21:18:56,088 INFO [M:0;101545f66cbd:36475 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/.lastflushedseqids 2024-12-03T21:18:56,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43547 is added to blk_1073741893_1079 (size=130) 2024-12-03T21:18:56,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36867 is added to blk_1073741893_1079 (size=130) 2024-12-03T21:18:56,097 INFO [M:0;101545f66cbd:36475 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-03T21:18:56,097 INFO [M:0;101545f66cbd:36475 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T21:18:56,097 DEBUG [M:0;101545f66cbd:36475 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T21:18:56,097 INFO [M:0;101545f66cbd:36475 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:18:56,098 DEBUG [M:0;101545f66cbd:36475 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:18:56,098 DEBUG [M:0;101545f66cbd:36475 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T21:18:56,098 DEBUG [M:0;101545f66cbd:36475 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:18:56,098 INFO [M:0;101545f66cbd:36475 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.26 KB heapSize=29.50 KB 2024-12-03T21:18:56,113 DEBUG [M:0;101545f66cbd:36475 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/15af5a192ca54b3c98bb729c0a8e0988 is 82, key is hbase:meta,,1/info:regioninfo/1733260692623/Put/seqid=0 2024-12-03T21:18:56,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36867 is added to blk_1073741894_1080 (size=5672) 2024-12-03T21:18:56,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43547 is added to blk_1073741894_1080 (size=5672) 2024-12-03T21:18:56,118 INFO [M:0;101545f66cbd:36475 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/15af5a192ca54b3c98bb729c0a8e0988 2024-12-03T21:18:56,137 DEBUG [M:0;101545f66cbd:36475 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/526a713612ca4e42b091b80bb59eeb9d is 775, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733260693356/Put/seqid=0 2024-12-03T21:18:56,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43547 is added to blk_1073741895_1081 (size=6256) 2024-12-03T21:18:56,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36867 is added to blk_1073741895_1081 (size=6256) 2024-12-03T21:18:56,143 INFO [M:0;101545f66cbd:36475 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.59 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/526a713612ca4e42b091b80bb59eeb9d 2024-12-03T21:18:56,148 INFO [M:0;101545f66cbd:36475 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 526a713612ca4e42b091b80bb59eeb9d 2024-12-03T21:18:56,161 DEBUG [M:0;101545f66cbd:36475 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2702edb027a14cd78fbecba8b280c86a is 69, key is 101545f66cbd,39741,1733260692808/rs:state/1733260692885/Put/seqid=0 2024-12-03T21:18:56,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43547 is added to blk_1073741896_1082 (size=5224) 2024-12-03T21:18:56,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36867 is added to blk_1073741896_1082 (size=5224) 2024-12-03T21:18:56,167 INFO [M:0;101545f66cbd:36475 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2702edb027a14cd78fbecba8b280c86a 2024-12-03T21:18:56,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45695-0x1019e58f9790001, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T21:18:56,170 INFO [RS:0;101545f66cbd:45695 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T21:18:56,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45695-0x1019e58f9790001, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T21:18:56,170 INFO [RS:0;101545f66cbd:45695 {}] regionserver.HRegionServer(1031): Exiting; stopping=101545f66cbd,45695,1733260691542; zookeeper connection closed. 2024-12-03T21:18:56,171 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@605a6c6d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@605a6c6d 2024-12-03T21:18:56,171 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-12-03T21:18:56,185 DEBUG [M:0;101545f66cbd:36475 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7949640ac09e4d8f8aadc586bed5d053 is 52, key is load_balancer_on/state:d/1733260692793/Put/seqid=0 2024-12-03T21:18:56,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36867 is added to blk_1073741897_1083 (size=5056) 2024-12-03T21:18:56,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43547 is added to blk_1073741897_1083 (size=5056) 2024-12-03T21:18:56,190 INFO [M:0;101545f66cbd:36475 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7949640ac09e4d8f8aadc586bed5d053 2024-12-03T21:18:56,195 DEBUG [M:0;101545f66cbd:36475 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/15af5a192ca54b3c98bb729c0a8e0988 as hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/15af5a192ca54b3c98bb729c0a8e0988 2024-12-03T21:18:56,202 INFO [M:0;101545f66cbd:36475 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/15af5a192ca54b3c98bb729c0a8e0988, entries=8, sequenceid=60, filesize=5.5 K 2024-12-03T21:18:56,203 DEBUG [M:0;101545f66cbd:36475 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/526a713612ca4e42b091b80bb59eeb9d as hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/526a713612ca4e42b091b80bb59eeb9d 2024-12-03T21:18:56,209 INFO [M:0;101545f66cbd:36475 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 526a713612ca4e42b091b80bb59eeb9d 2024-12-03T21:18:56,209 INFO [M:0;101545f66cbd:36475 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/526a713612ca4e42b091b80bb59eeb9d, entries=6, sequenceid=60, filesize=6.1 K 2024-12-03T21:18:56,210 DEBUG [M:0;101545f66cbd:36475 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2702edb027a14cd78fbecba8b280c86a as hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2702edb027a14cd78fbecba8b280c86a 2024-12-03T21:18:56,216 INFO [M:0;101545f66cbd:36475 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2702edb027a14cd78fbecba8b280c86a, entries=2, sequenceid=60, filesize=5.1 K 2024-12-03T21:18:56,217 DEBUG [M:0;101545f66cbd:36475 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7949640ac09e4d8f8aadc586bed5d053 as hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7949640ac09e4d8f8aadc586bed5d053 2024-12-03T21:18:56,222 INFO [M:0;101545f66cbd:36475 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7949640ac09e4d8f8aadc586bed5d053, entries=1, sequenceid=60, filesize=4.9 K 2024-12-03T21:18:56,223 INFO [M:0;101545f66cbd:36475 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 125ms, sequenceid=60, compaction requested=false 2024-12-03T21:18:56,225 INFO [M:0;101545f66cbd:36475 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:18:56,225 DEBUG [M:0;101545f66cbd:36475 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733260736097Disabling compacts and flushes for region at 1733260736097Disabling writes for close at 1733260736098 (+1 ms)Obtaining lock to block concurrent updates at 1733260736098Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733260736098Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23817, getHeapSize=30144, getOffHeapSize=0, getCellsCount=71 at 1733260736098Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733260736099 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733260736099Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733260736112 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733260736112Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733260736123 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733260736137 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733260736137Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733260736148 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733260736161 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733260736161Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733260736172 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733260736185 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733260736185Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4aaaefaa: reopening flushed file at 1733260736194 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@160632ca: reopening flushed file at 1733260736202 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2dc165e3: reopening flushed file at 1733260736209 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@13b71dc1: reopening flushed file at 1733260736216 (+7 ms)Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 125ms, sequenceid=60, compaction requested=false at 1733260736223 (+7 ms)Writing region close event to WAL at 1733260736224 (+1 ms)Closed at 1733260736224 2024-12-03T21:18:56,226 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:56,226 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:56,226 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:56,226 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:56,226 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:18:56,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36867 is added to blk_1073741880_1063 (size=1045) 2024-12-03T21:18:56,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43547 is added to blk_1073741880_1063 (size=1045) 2024-12-03T21:18:56,229 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T21:18:56,229 INFO [M:0;101545f66cbd:36475 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-03T21:18:56,229 INFO [M:0;101545f66cbd:36475 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36475 2024-12-03T21:18:56,229 INFO [M:0;101545f66cbd:36475 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T21:18:56,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36475-0x1019e58f9790000, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T21:18:56,337 INFO [M:0;101545f66cbd:36475 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T21:18:56,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36475-0x1019e58f9790000, quorum=127.0.0.1:59685, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T21:18:56,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36867 is added to blk_1073741825_1001 (size=7) 2024-12-03T21:18:56,341 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4f470d88{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:18:56,342 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@261adbd7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T21:18:56,342 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T21:18:56,342 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@d3b3ece{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T21:18:56,343 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c598caf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/hadoop.log.dir/,STOPPED} 2024-12-03T21:18:56,344 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T21:18:56,345 WARN [BP-698693125-172.17.0.2-1733260689316 heartbeating to localhost/127.0.0.1:36575 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T21:18:56,345 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T21:18:56,345 WARN [BP-698693125-172.17.0.2-1733260689316 heartbeating to localhost/127.0.0.1:36575 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-698693125-172.17.0.2-1733260689316 (Datanode Uuid cbf1eb3c-64f4-4d13-a033-84daec941fc5) service to localhost/127.0.0.1:36575 2024-12-03T21:18:56,344 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@732cc172 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-698693125-172.17.0.2-1733260689316:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:39781,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:33003 , LocalHost:localPort 101545f66cbd/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-03T21:18:56,345 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@732cc172 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-698693125-172.17.0.2-1733260689316:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:43547,null,null]) java.io.IOException: No block pool offer service for bpid=BP-698693125-172.17.0.2-1733260689316 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:56,346 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@732cc172 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-698693125-172.17.0.2-1733260689316:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:39781,null,null], DatanodeInfoWithStorage[127.0.0.1:43547,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-698693125-172.17.0.2-1733260689316:blk_1073741837_1013, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:39781,null,null], DatanodeInfoWithStorage[127.0.0.1:43547,null,null]] 2024-12-03T21:18:56,346 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data3/current/BP-698693125-172.17.0.2-1733260689316 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:18:56,346 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@732cc172 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-698693125-172.17.0.2-1733260689316:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:43547,null,null]) java.io.IOException: No block pool offer service for bpid=BP-698693125-172.17.0.2-1733260689316 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:56,346 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@732cc172 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-698693125-172.17.0.2-1733260689316:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:39781,null,null]) java.io.IOException: No block pool offer service for bpid=BP-698693125-172.17.0.2-1733260689316 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:18:56,346 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@732cc172 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-698693125-172.17.0.2-1733260689316:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:43547,null,null], DatanodeInfoWithStorage[127.0.0.1:39781,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-698693125-172.17.0.2-1733260689316:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:43547,null,null], DatanodeInfoWithStorage[127.0.0.1:39781,null,null]] 2024-12-03T21:18:56,346 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data4/current/BP-698693125-172.17.0.2-1733260689316 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:18:56,347 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T21:18:56,349 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@77d9f90{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:18:56,349 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@24a7a164{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T21:18:56,349 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T21:18:56,350 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7bf0ac18{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T21:18:56,350 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21f35bd7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/hadoop.log.dir/,STOPPED} 2024-12-03T21:18:56,351 WARN [BP-698693125-172.17.0.2-1733260689316 heartbeating to localhost/127.0.0.1:36575 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T21:18:56,351 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T21:18:56,351 WARN [BP-698693125-172.17.0.2-1733260689316 heartbeating to localhost/127.0.0.1:36575 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-698693125-172.17.0.2-1733260689316 (Datanode Uuid f23cfe64-c25c-42a3-9017-a7eff60f0915) service to localhost/127.0.0.1:36575 2024-12-03T21:18:56,351 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T21:18:56,352 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data5/current/BP-698693125-172.17.0.2-1733260689316 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:18:56,352 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/cluster_facca1fe-3a27-599b-0225-951bd639884c/data/data6/current/BP-698693125-172.17.0.2-1733260689316 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:18:56,352 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T21:18:56,359 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@acc902f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T21:18:56,359 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@71ccb409{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T21:18:56,359 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T21:18:56,360 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c3e366f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T21:18:56,360 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@100caf4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/hadoop.log.dir/,STOPPED} 2024-12-03T21:18:56,369 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-03T21:18:56,383 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T21:18:56,394 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:18:56,394 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:18:56,394 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:18:56,395 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:18:56,395 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:18:56,395 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:18:56,398 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:18:56,398 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:18:56,398 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:18:56,400 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:18:56,414 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-03T21:18:56,422 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=155 (was 79) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:36575 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44367 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36575 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36575 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:44367 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:36575 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:36575 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007f3a90bf1e20.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007f3a90bf1e20.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36575 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36575 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36575 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:36575 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:36575 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:36575 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 405) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=157 (was 172), ProcessCount=11 (was 11), AvailableMemoryMB=2488 (was 3047) 2024-12-03T21:18:56,429 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=155, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=157, ProcessCount=11, AvailableMemoryMB=2489 2024-12-03T21:18:56,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T21:18:56,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/hadoop.log.dir so I do NOT create it in target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46 2024-12-03T21:18:56,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e9d42888-668d-aaec-8e33-515f6374998c/hadoop.tmp.dir so I do NOT create it in target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46 2024-12-03T21:18:56,429 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/cluster_8c45575c-c5e0-2e31-2f83-6ba9e05c8f38, deleteOnExit=true 2024-12-03T21:18:56,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-03T21:18:56,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/test.cache.data in system properties and HBase conf 2024-12-03T21:18:56,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T21:18:56,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/hadoop.log.dir in system properties and HBase conf 2024-12-03T21:18:56,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T21:18:56,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T21:18:56,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-03T21:18:56,430 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T21:18:56,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T21:18:56,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T21:18:56,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T21:18:56,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T21:18:56,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T21:18:56,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T21:18:56,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T21:18:56,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T21:18:56,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T21:18:56,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/nfs.dump.dir in system properties and HBase conf 2024-12-03T21:18:56,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/java.io.tmpdir in system properties and HBase conf 2024-12-03T21:18:56,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T21:18:56,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T21:18:56,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T21:18:56,443 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T21:18:56,773 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:18:56,781 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T21:18:56,792 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T21:18:56,792 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T21:18:56,792 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T21:18:56,793 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:18:56,794 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ea9d584{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/hadoop.log.dir/,AVAILABLE} 2024-12-03T21:18:56,794 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@101bbbad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T21:18:56,824 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:18:56,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:18:56,927 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6d7adad9{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/java.io.tmpdir/jetty-localhost-35781-hadoop-hdfs-3_4_1-tests_jar-_-any-6473087422729572364/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T21:18:56,928 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4da18e2b{HTTP/1.1, (http/1.1)}{localhost:35781} 2024-12-03T21:18:56,928 INFO [Time-limited test {}] server.Server(415): Started @150026ms 2024-12-03T21:18:56,945 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T21:18:57,163 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:18:57,165 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T21:18:57,166 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T21:18:57,166 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T21:18:57,166 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T21:18:57,167 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a2c549c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/hadoop.log.dir/,AVAILABLE} 2024-12-03T21:18:57,167 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@245efd98{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T21:18:57,256 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3a6fb45d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/java.io.tmpdir/jetty-localhost-46653-hadoop-hdfs-3_4_1-tests_jar-_-any-17103148393603728031/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:18:57,256 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4a03251a{HTTP/1.1, (http/1.1)}{localhost:46653} 2024-12-03T21:18:57,256 INFO [Time-limited test {}] server.Server(415): Started @150354ms 2024-12-03T21:18:57,257 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T21:18:57,280 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:18:57,283 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T21:18:57,284 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T21:18:57,284 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T21:18:57,284 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T21:18:57,285 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@107ec413{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/hadoop.log.dir/,AVAILABLE} 2024-12-03T21:18:57,285 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@43ae6a54{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T21:18:57,383 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@229e7731{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/java.io.tmpdir/jetty-localhost-42755-hadoop-hdfs-3_4_1-tests_jar-_-any-8017928397193042009/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:18:57,383 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2eaba4db{HTTP/1.1, (http/1.1)}{localhost:42755} 2024-12-03T21:18:57,384 INFO [Time-limited test {}] server.Server(415): Started @150481ms 2024-12-03T21:18:57,385 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T21:18:57,825 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:18:57,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:18:58,222 WARN [Thread-1188 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/cluster_8c45575c-c5e0-2e31-2f83-6ba9e05c8f38/data/data1/current/BP-317319251-172.17.0.2-1733260736457/current, will proceed with Du for space computation calculation, 2024-12-03T21:18:58,222 WARN [Thread-1189 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/cluster_8c45575c-c5e0-2e31-2f83-6ba9e05c8f38/data/data2/current/BP-317319251-172.17.0.2-1733260736457/current, will proceed with Du for space computation calculation, 2024-12-03T21:18:58,238 WARN [Thread-1152 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T21:18:58,240 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x40233ea23f94de46 with lease ID 0x4fa6c3842fe7fe66: Processing first storage report for DS-f8da228f-f7c1-402f-b426-5d71fccc0b67 from datanode DatanodeRegistration(127.0.0.1:42379, datanodeUuid=438aa9b8-2ca8-41c5-b946-3172aa9e8abc, infoPort=35647, infoSecurePort=0, ipcPort=41757, storageInfo=lv=-57;cid=testClusterID;nsid=1463046525;c=1733260736457) 2024-12-03T21:18:58,240 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x40233ea23f94de46 with lease ID 0x4fa6c3842fe7fe66: from storage DS-f8da228f-f7c1-402f-b426-5d71fccc0b67 node DatanodeRegistration(127.0.0.1:42379, datanodeUuid=438aa9b8-2ca8-41c5-b946-3172aa9e8abc, infoPort=35647, infoSecurePort=0, ipcPort=41757, storageInfo=lv=-57;cid=testClusterID;nsid=1463046525;c=1733260736457), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:18:58,240 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x40233ea23f94de46 with lease ID 0x4fa6c3842fe7fe66: Processing first storage report for DS-a3977948-18c3-4227-bb48-ed9d474f55a4 from datanode DatanodeRegistration(127.0.0.1:42379, datanodeUuid=438aa9b8-2ca8-41c5-b946-3172aa9e8abc, infoPort=35647, infoSecurePort=0, ipcPort=41757, storageInfo=lv=-57;cid=testClusterID;nsid=1463046525;c=1733260736457) 2024-12-03T21:18:58,240 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x40233ea23f94de46 with lease ID 0x4fa6c3842fe7fe66: from storage DS-a3977948-18c3-4227-bb48-ed9d474f55a4 node DatanodeRegistration(127.0.0.1:42379, datanodeUuid=438aa9b8-2ca8-41c5-b946-3172aa9e8abc, infoPort=35647, infoSecurePort=0, ipcPort=41757, storageInfo=lv=-57;cid=testClusterID;nsid=1463046525;c=1733260736457), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:18:58,375 WARN [Thread-1199 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/cluster_8c45575c-c5e0-2e31-2f83-6ba9e05c8f38/data/data3/current/BP-317319251-172.17.0.2-1733260736457/current, will proceed with Du for space computation calculation, 2024-12-03T21:18:58,375 WARN [Thread-1200 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/cluster_8c45575c-c5e0-2e31-2f83-6ba9e05c8f38/data/data4/current/BP-317319251-172.17.0.2-1733260736457/current, will proceed with Du for space computation calculation, 2024-12-03T21:18:58,395 WARN [Thread-1175 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T21:18:58,397 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x27d2f55ce5be3d77 with lease ID 0x4fa6c3842fe7fe67: Processing first storage report for DS-bfaa2733-3765-41d0-9ed1-5fa513afe699 from datanode DatanodeRegistration(127.0.0.1:42839, datanodeUuid=5bd2cc89-e474-4f0b-89d1-e18cbbd6cbf4, infoPort=45621, infoSecurePort=0, ipcPort=45447, storageInfo=lv=-57;cid=testClusterID;nsid=1463046525;c=1733260736457) 2024-12-03T21:18:58,397 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x27d2f55ce5be3d77 with lease ID 0x4fa6c3842fe7fe67: from storage DS-bfaa2733-3765-41d0-9ed1-5fa513afe699 node DatanodeRegistration(127.0.0.1:42839, datanodeUuid=5bd2cc89-e474-4f0b-89d1-e18cbbd6cbf4, infoPort=45621, infoSecurePort=0, ipcPort=45447, storageInfo=lv=-57;cid=testClusterID;nsid=1463046525;c=1733260736457), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:18:58,397 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x27d2f55ce5be3d77 with lease ID 0x4fa6c3842fe7fe67: Processing first storage report for DS-bb2c3638-4b19-4c84-9ccc-5d977c537c1c from datanode DatanodeRegistration(127.0.0.1:42839, datanodeUuid=5bd2cc89-e474-4f0b-89d1-e18cbbd6cbf4, infoPort=45621, infoSecurePort=0, ipcPort=45447, storageInfo=lv=-57;cid=testClusterID;nsid=1463046525;c=1733260736457) 2024-12-03T21:18:58,398 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x27d2f55ce5be3d77 with lease ID 0x4fa6c3842fe7fe67: from storage DS-bb2c3638-4b19-4c84-9ccc-5d977c537c1c node DatanodeRegistration(127.0.0.1:42839, datanodeUuid=5bd2cc89-e474-4f0b-89d1-e18cbbd6cbf4, infoPort=45621, infoSecurePort=0, ipcPort=45447, storageInfo=lv=-57;cid=testClusterID;nsid=1463046525;c=1733260736457), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:18:58,414 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46 2024-12-03T21:18:58,417 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/cluster_8c45575c-c5e0-2e31-2f83-6ba9e05c8f38/zookeeper_0, clientPort=49229, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/cluster_8c45575c-c5e0-2e31-2f83-6ba9e05c8f38/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/cluster_8c45575c-c5e0-2e31-2f83-6ba9e05c8f38/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T21:18:58,418 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49229 2024-12-03T21:18:58,419 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:18:58,421 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:18:58,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42839 is added to blk_1073741825_1001 (size=7) 2024-12-03T21:18:58,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42379 is added to blk_1073741825_1001 (size=7) 2024-12-03T21:18:58,432 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c with version=8 2024-12-03T21:18:58,432 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/hbase-staging 2024-12-03T21:18:58,434 INFO [Time-limited test {}] client.ConnectionUtils(128): master/101545f66cbd:0 server-side Connection retries=45 2024-12-03T21:18:58,434 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T21:18:58,434 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T21:18:58,434 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T21:18:58,434 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T21:18:58,434 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T21:18:58,434 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-03T21:18:58,434 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T21:18:58,435 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41581 2024-12-03T21:18:58,436 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41581 connecting to ZooKeeper ensemble=127.0.0.1:49229 2024-12-03T21:18:58,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:415810x0, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T21:18:58,506 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41581-0x1019e59b1590000 connected 2024-12-03T21:18:58,570 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:18:58,572 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:18:58,575 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41581-0x1019e59b1590000, quorum=127.0.0.1:49229, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T21:18:58,575 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c, hbase.cluster.distributed=false 2024-12-03T21:18:58,577 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41581-0x1019e59b1590000, quorum=127.0.0.1:49229, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T21:18:58,578 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41581 2024-12-03T21:18:58,578 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41581 2024-12-03T21:18:58,578 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41581 2024-12-03T21:18:58,579 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41581 2024-12-03T21:18:58,579 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41581 2024-12-03T21:18:58,594 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/101545f66cbd:0 server-side Connection retries=45 2024-12-03T21:18:58,595 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T21:18:58,595 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T21:18:58,595 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T21:18:58,595 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T21:18:58,595 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T21:18:58,595 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T21:18:58,595 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T21:18:58,596 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33773 2024-12-03T21:18:58,597 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33773 connecting to ZooKeeper ensemble=127.0.0.1:49229 2024-12-03T21:18:58,597 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:18:58,599 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:18:58,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:337730x0, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T21:18:58,610 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:337730x0, quorum=127.0.0.1:49229, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T21:18:58,610 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33773-0x1019e59b1590001 connected 2024-12-03T21:18:58,610 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T21:18:58,611 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T21:18:58,612 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33773-0x1019e59b1590001, quorum=127.0.0.1:49229, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T21:18:58,613 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33773-0x1019e59b1590001, quorum=127.0.0.1:49229, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T21:18:58,613 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33773 2024-12-03T21:18:58,614 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33773 2024-12-03T21:18:58,614 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33773 2024-12-03T21:18:58,614 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33773 2024-12-03T21:18:58,615 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33773 2024-12-03T21:18:58,629 DEBUG [M:0;101545f66cbd:41581 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;101545f66cbd:41581 2024-12-03T21:18:58,630 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/101545f66cbd,41581,1733260738434 2024-12-03T21:18:58,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33773-0x1019e59b1590001, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T21:18:58,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41581-0x1019e59b1590000, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T21:18:58,637 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41581-0x1019e59b1590000, quorum=127.0.0.1:49229, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/101545f66cbd,41581,1733260738434 2024-12-03T21:18:58,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41581-0x1019e59b1590000, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:58,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33773-0x1019e59b1590001, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T21:18:58,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33773-0x1019e59b1590001, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:58,645 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41581-0x1019e59b1590000, quorum=127.0.0.1:49229, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T21:18:58,646 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/101545f66cbd,41581,1733260738434 from backup master directory 2024-12-03T21:18:58,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41581-0x1019e59b1590000, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/101545f66cbd,41581,1733260738434 2024-12-03T21:18:58,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33773-0x1019e59b1590001, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T21:18:58,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41581-0x1019e59b1590000, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T21:18:58,653 WARN [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T21:18:58,653 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=101545f66cbd,41581,1733260738434 2024-12-03T21:18:58,658 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/hbase.id] with ID: 837cd589-4947-4570-a2dc-60391fbaed19 2024-12-03T21:18:58,658 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/.tmp/hbase.id 2024-12-03T21:18:58,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42379 is added to blk_1073741826_1002 (size=42) 2024-12-03T21:18:58,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42839 is added to blk_1073741826_1002 (size=42) 2024-12-03T21:18:58,664 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/.tmp/hbase.id]:[hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/hbase.id] 2024-12-03T21:18:58,675 INFO [master/101545f66cbd:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:18:58,675 INFO [master/101545f66cbd:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-03T21:18:58,677 INFO [master/101545f66cbd:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-03T21:18:58,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33773-0x1019e59b1590001, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:58,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41581-0x1019e59b1590000, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:58,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42379 is added to blk_1073741827_1003 (size=196) 2024-12-03T21:18:58,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42839 is added to blk_1073741827_1003 (size=196) 2024-12-03T21:18:58,694 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T21:18:58,695 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T21:18:58,695 INFO [master/101545f66cbd:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T21:18:58,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42379 is added to blk_1073741828_1004 (size=1189) 2024-12-03T21:18:58,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42839 is added to blk_1073741828_1004 (size=1189) 2024-12-03T21:18:58,703 INFO [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/data/master/store 2024-12-03T21:18:58,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42379 is added to blk_1073741829_1005 (size=34) 2024-12-03T21:18:58,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42839 is added to blk_1073741829_1005 (size=34) 2024-12-03T21:18:58,709 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:18:58,709 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T21:18:58,709 INFO [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:18:58,709 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:18:58,709 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T21:18:58,709 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:18:58,710 INFO [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:18:58,710 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733260738709Disabling compacts and flushes for region at 1733260738709Disabling writes for close at 1733260738709Writing region close event to WAL at 1733260738710 (+1 ms)Closed at 1733260738710 2024-12-03T21:18:58,711 WARN [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/data/master/store/.initializing 2024-12-03T21:18:58,711 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/WALs/101545f66cbd,41581,1733260738434 2024-12-03T21:18:58,713 INFO [master/101545f66cbd:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=101545f66cbd%2C41581%2C1733260738434, suffix=, logDir=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/WALs/101545f66cbd,41581,1733260738434, archiveDir=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/oldWALs, maxLogs=10 2024-12-03T21:18:58,714 INFO [master/101545f66cbd:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C41581%2C1733260738434.1733260738713 2024-12-03T21:18:58,718 INFO [master/101545f66cbd:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/WALs/101545f66cbd,41581,1733260738434/101545f66cbd%2C41581%2C1733260738434.1733260738713 2024-12-03T21:18:58,719 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45621:45621),(127.0.0.1/127.0.0.1:35647:35647)] 2024-12-03T21:18:58,721 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T21:18:58,722 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:18:58,722 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:58,722 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:58,723 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:58,724 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T21:18:58,725 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:58,725 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:18:58,725 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:58,726 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T21:18:58,726 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:58,726 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T21:18:58,726 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:58,727 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T21:18:58,727 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:58,728 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T21:18:58,728 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:58,729 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T21:18:58,729 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:58,730 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T21:18:58,730 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:58,730 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:58,731 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:58,732 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:58,732 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:58,732 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T21:18:58,734 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:18:58,736 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T21:18:58,737 INFO [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=872649, jitterRate=0.10963118076324463}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T21:18:58,737 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733260738722Initializing all the Stores at 1733260738723 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260738723Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260738723Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260738723Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260738723Cleaning up temporary data from old regions at 1733260738732 (+9 ms)Region opened successfully at 1733260738737 (+5 ms) 2024-12-03T21:18:58,737 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T21:18:58,741 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@754417d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=101545f66cbd/172.17.0.2:0 2024-12-03T21:18:58,742 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-03T21:18:58,742 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T21:18:58,742 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T21:18:58,742 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T21:18:58,743 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-03T21:18:58,743 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-03T21:18:58,743 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T21:18:58,745 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T21:18:58,746 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41581-0x1019e59b1590000, quorum=127.0.0.1:49229, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T21:18:58,753 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-03T21:18:58,754 INFO [master/101545f66cbd:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T21:18:58,754 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41581-0x1019e59b1590000, quorum=127.0.0.1:49229, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T21:18:58,761 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-03T21:18:58,762 INFO [master/101545f66cbd:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T21:18:58,763 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41581-0x1019e59b1590000, quorum=127.0.0.1:49229, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T21:18:58,770 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-03T21:18:58,771 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41581-0x1019e59b1590000, quorum=127.0.0.1:49229, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T21:18:58,778 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T21:18:58,781 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41581-0x1019e59b1590000, quorum=127.0.0.1:49229, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T21:18:58,787 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T21:18:58,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33773-0x1019e59b1590001, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T21:18:58,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41581-0x1019e59b1590000, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T21:18:58,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33773-0x1019e59b1590001, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:58,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41581-0x1019e59b1590000, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:58,796 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=101545f66cbd,41581,1733260738434, sessionid=0x1019e59b1590000, setting cluster-up flag (Was=false) 2024-12-03T21:18:58,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33773-0x1019e59b1590001, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:58,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41581-0x1019e59b1590000, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:58,825 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:18:58,836 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:18:58,837 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T21:18:58,838 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=101545f66cbd,41581,1733260738434 2024-12-03T21:18:58,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41581-0x1019e59b1590000, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:58,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33773-0x1019e59b1590001, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:58,878 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T21:18:58,880 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=101545f66cbd,41581,1733260738434 2024-12-03T21:18:58,881 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-03T21:18:58,883 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-03T21:18:58,883 INFO [master/101545f66cbd:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-03T21:18:58,883 INFO [master/101545f66cbd:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T21:18:58,883 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 101545f66cbd,41581,1733260738434 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T21:18:58,885 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/101545f66cbd:0, corePoolSize=5, maxPoolSize=5 2024-12-03T21:18:58,885 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/101545f66cbd:0, corePoolSize=5, maxPoolSize=5 2024-12-03T21:18:58,885 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/101545f66cbd:0, corePoolSize=5, maxPoolSize=5 2024-12-03T21:18:58,885 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/101545f66cbd:0, corePoolSize=5, maxPoolSize=5 2024-12-03T21:18:58,885 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/101545f66cbd:0, corePoolSize=10, maxPoolSize=10 2024-12-03T21:18:58,886 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:58,886 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/101545f66cbd:0, corePoolSize=2, maxPoolSize=2 2024-12-03T21:18:58,886 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:58,887 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733260768887 2024-12-03T21:18:58,887 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T21:18:58,887 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T21:18:58,887 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T21:18:58,887 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T21:18:58,887 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T21:18:58,887 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T21:18:58,887 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:58,887 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T21:18:58,888 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-03T21:18:58,888 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T21:18:58,888 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T21:18:58,888 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T21:18:58,888 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T21:18:58,888 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T21:18:58,888 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.large.0-1733260738888,5,FailOnTimeoutGroup] 2024-12-03T21:18:58,889 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:58,889 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T21:18:58,890 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.small.0-1733260738888,5,FailOnTimeoutGroup] 2024-12-03T21:18:58,890 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:58,890 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T21:18:58,890 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:58,890 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:58,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42839 is added to blk_1073741831_1007 (size=1321) 2024-12-03T21:18:58,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42379 is added to blk_1073741831_1007 (size=1321) 2024-12-03T21:18:58,898 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-03T21:18:58,899 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c 2024-12-03T21:18:58,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42839 is added to blk_1073741832_1008 (size=32) 2024-12-03T21:18:58,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42379 is added to blk_1073741832_1008 (size=32) 2024-12-03T21:18:58,910 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:18:58,911 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T21:18:58,913 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T21:18:58,913 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:58,914 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:18:58,914 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T21:18:58,916 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T21:18:58,916 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:58,916 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:18:58,917 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T21:18:58,918 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T21:18:58,918 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:58,919 INFO [RS:0;101545f66cbd:33773 {}] regionserver.HRegionServer(746): ClusterId : 837cd589-4947-4570-a2dc-60391fbaed19 2024-12-03T21:18:58,919 DEBUG [RS:0;101545f66cbd:33773 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T21:18:58,919 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:18:58,919 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T21:18:58,920 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T21:18:58,921 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:58,921 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:18:58,921 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T21:18:58,922 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/data/hbase/meta/1588230740 2024-12-03T21:18:58,922 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/data/hbase/meta/1588230740 2024-12-03T21:18:58,924 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T21:18:58,924 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T21:18:58,924 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T21:18:58,926 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T21:18:58,928 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T21:18:58,928 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=771003, jitterRate=-0.0196198970079422}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T21:18:58,929 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733260738910Initializing all the Stores at 1733260738911 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260738911Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260738911Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260738911Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260738911Cleaning up temporary data from old regions at 1733260738924 (+13 ms)Region opened successfully at 1733260738929 (+5 ms) 2024-12-03T21:18:58,929 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T21:18:58,929 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T21:18:58,929 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T21:18:58,929 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T21:18:58,929 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T21:18:58,929 DEBUG [RS:0;101545f66cbd:33773 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T21:18:58,929 DEBUG [RS:0;101545f66cbd:33773 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T21:18:58,929 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T21:18:58,929 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733260738929Disabling compacts and flushes for region at 1733260738929Disabling writes for close at 1733260738929Writing region close event to WAL at 1733260738929Closed at 1733260738929 2024-12-03T21:18:58,931 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T21:18:58,931 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-03T21:18:58,931 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T21:18:58,933 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T21:18:58,934 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T21:18:58,937 DEBUG [RS:0;101545f66cbd:33773 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T21:18:58,938 DEBUG [RS:0;101545f66cbd:33773 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12555915, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=101545f66cbd/172.17.0.2:0 2024-12-03T21:18:58,948 DEBUG [RS:0;101545f66cbd:33773 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;101545f66cbd:33773 2024-12-03T21:18:58,948 INFO [RS:0;101545f66cbd:33773 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T21:18:58,948 INFO [RS:0;101545f66cbd:33773 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T21:18:58,948 DEBUG [RS:0;101545f66cbd:33773 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T21:18:58,949 INFO [RS:0;101545f66cbd:33773 {}] regionserver.HRegionServer(2659): reportForDuty to master=101545f66cbd,41581,1733260738434 with port=33773, startcode=1733260738594 2024-12-03T21:18:58,949 DEBUG [RS:0;101545f66cbd:33773 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T21:18:58,951 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42007, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T21:18:58,952 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41581 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 101545f66cbd,33773,1733260738594 2024-12-03T21:18:58,952 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41581 {}] master.ServerManager(517): Registering regionserver=101545f66cbd,33773,1733260738594 2024-12-03T21:18:58,954 DEBUG [RS:0;101545f66cbd:33773 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c 2024-12-03T21:18:58,954 DEBUG [RS:0;101545f66cbd:33773 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42369 2024-12-03T21:18:58,954 DEBUG [RS:0;101545f66cbd:33773 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T21:18:58,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41581-0x1019e59b1590000, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T21:18:58,962 DEBUG [RS:0;101545f66cbd:33773 {}] zookeeper.ZKUtil(111): regionserver:33773-0x1019e59b1590001, quorum=127.0.0.1:49229, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/101545f66cbd,33773,1733260738594 2024-12-03T21:18:58,962 WARN [RS:0;101545f66cbd:33773 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T21:18:58,962 INFO [RS:0;101545f66cbd:33773 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T21:18:58,963 DEBUG [RS:0;101545f66cbd:33773 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594 2024-12-03T21:18:58,963 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [101545f66cbd,33773,1733260738594] 2024-12-03T21:18:58,966 INFO [RS:0;101545f66cbd:33773 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T21:18:58,968 INFO [RS:0;101545f66cbd:33773 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T21:18:58,968 INFO [RS:0;101545f66cbd:33773 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T21:18:58,968 INFO [RS:0;101545f66cbd:33773 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:58,968 INFO [RS:0;101545f66cbd:33773 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T21:18:58,969 INFO [RS:0;101545f66cbd:33773 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T21:18:58,969 INFO [RS:0;101545f66cbd:33773 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:58,969 DEBUG [RS:0;101545f66cbd:33773 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:58,969 DEBUG [RS:0;101545f66cbd:33773 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:58,969 DEBUG [RS:0;101545f66cbd:33773 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:58,969 DEBUG [RS:0;101545f66cbd:33773 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:58,969 DEBUG [RS:0;101545f66cbd:33773 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:58,969 DEBUG [RS:0;101545f66cbd:33773 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/101545f66cbd:0, corePoolSize=2, maxPoolSize=2 2024-12-03T21:18:58,969 DEBUG [RS:0;101545f66cbd:33773 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:58,969 DEBUG [RS:0;101545f66cbd:33773 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:58,970 DEBUG [RS:0;101545f66cbd:33773 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:58,970 DEBUG [RS:0;101545f66cbd:33773 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:58,970 DEBUG [RS:0;101545f66cbd:33773 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:58,970 DEBUG [RS:0;101545f66cbd:33773 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:18:58,970 DEBUG [RS:0;101545f66cbd:33773 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/101545f66cbd:0, corePoolSize=3, maxPoolSize=3 2024-12-03T21:18:58,970 DEBUG [RS:0;101545f66cbd:33773 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0, corePoolSize=3, maxPoolSize=3 2024-12-03T21:18:58,970 INFO [RS:0;101545f66cbd:33773 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:58,970 INFO [RS:0;101545f66cbd:33773 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:58,970 INFO [RS:0;101545f66cbd:33773 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:58,970 INFO [RS:0;101545f66cbd:33773 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:58,970 INFO [RS:0;101545f66cbd:33773 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:58,970 INFO [RS:0;101545f66cbd:33773 {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,33773,1733260738594-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T21:18:58,982 INFO [RS:0;101545f66cbd:33773 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T21:18:58,983 INFO [RS:0;101545f66cbd:33773 {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,33773,1733260738594-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:58,983 INFO [RS:0;101545f66cbd:33773 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:58,983 INFO [RS:0;101545f66cbd:33773 {}] regionserver.Replication(171): 101545f66cbd,33773,1733260738594 started 2024-12-03T21:18:58,994 INFO [RS:0;101545f66cbd:33773 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:58,994 INFO [RS:0;101545f66cbd:33773 {}] regionserver.HRegionServer(1482): Serving as 101545f66cbd,33773,1733260738594, RpcServer on 101545f66cbd/172.17.0.2:33773, sessionid=0x1019e59b1590001 2024-12-03T21:18:58,995 DEBUG [RS:0;101545f66cbd:33773 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T21:18:58,995 DEBUG [RS:0;101545f66cbd:33773 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 101545f66cbd,33773,1733260738594 2024-12-03T21:18:58,995 DEBUG [RS:0;101545f66cbd:33773 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '101545f66cbd,33773,1733260738594' 2024-12-03T21:18:58,995 DEBUG [RS:0;101545f66cbd:33773 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T21:18:58,995 DEBUG [RS:0;101545f66cbd:33773 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T21:18:58,996 DEBUG [RS:0;101545f66cbd:33773 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T21:18:58,996 DEBUG [RS:0;101545f66cbd:33773 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T21:18:58,996 DEBUG [RS:0;101545f66cbd:33773 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 101545f66cbd,33773,1733260738594 2024-12-03T21:18:58,996 DEBUG [RS:0;101545f66cbd:33773 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '101545f66cbd,33773,1733260738594' 2024-12-03T21:18:58,996 DEBUG [RS:0;101545f66cbd:33773 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T21:18:58,996 DEBUG [RS:0;101545f66cbd:33773 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T21:18:58,996 DEBUG [RS:0;101545f66cbd:33773 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T21:18:58,996 INFO [RS:0;101545f66cbd:33773 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T21:18:58,996 INFO [RS:0;101545f66cbd:33773 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T21:18:59,084 WARN [101545f66cbd:41581 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-03T21:18:59,099 INFO [RS:0;101545f66cbd:33773 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=101545f66cbd%2C33773%2C1733260738594, suffix=, logDir=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594, archiveDir=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/oldWALs, maxLogs=32 2024-12-03T21:18:59,101 INFO [RS:0;101545f66cbd:33773 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C33773%2C1733260738594.1733260739100 2024-12-03T21:18:59,109 INFO [RS:0;101545f66cbd:33773 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260739100 2024-12-03T21:18:59,109 DEBUG [RS:0;101545f66cbd:33773 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35647:35647),(127.0.0.1/127.0.0.1:45621:45621)] 2024-12-03T21:18:59,334 DEBUG [101545f66cbd:41581 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-03T21:18:59,335 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=101545f66cbd,33773,1733260738594 2024-12-03T21:18:59,337 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 101545f66cbd,33773,1733260738594, state=OPENING 2024-12-03T21:18:59,362 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T21:18:59,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33773-0x1019e59b1590001, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:59,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41581-0x1019e59b1590000, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:18:59,371 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T21:18:59,371 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T21:18:59,371 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T21:18:59,371 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=101545f66cbd,33773,1733260738594}] 2024-12-03T21:18:59,526 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T21:18:59,531 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53091, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T21:18:59,536 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-03T21:18:59,536 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T21:18:59,539 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=101545f66cbd%2C33773%2C1733260738594.meta, suffix=.meta, logDir=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594, archiveDir=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/oldWALs, maxLogs=32 2024-12-03T21:18:59,540 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C33773%2C1733260738594.meta.1733260739539.meta 2024-12-03T21:18:59,546 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.meta.1733260739539.meta 2024-12-03T21:18:59,548 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45621:45621),(127.0.0.1/127.0.0.1:35647:35647)] 2024-12-03T21:18:59,549 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T21:18:59,549 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T21:18:59,549 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T21:18:59,549 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T21:18:59,550 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T21:18:59,550 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:18:59,550 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-03T21:18:59,550 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-03T21:18:59,552 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T21:18:59,553 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T21:18:59,553 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:59,554 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:18:59,554 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T21:18:59,555 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T21:18:59,555 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:59,556 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:18:59,556 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T21:18:59,557 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T21:18:59,558 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:59,558 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:18:59,558 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T21:18:59,559 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T21:18:59,559 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:59,559 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:18:59,560 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T21:18:59,561 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/data/hbase/meta/1588230740 2024-12-03T21:18:59,562 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/data/hbase/meta/1588230740 2024-12-03T21:18:59,563 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T21:18:59,564 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T21:18:59,564 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T21:18:59,565 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T21:18:59,566 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=852928, jitterRate=0.08455419540405273}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T21:18:59,566 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-03T21:18:59,567 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733260739550Writing region info on filesystem at 1733260739550Initializing all the Stores at 1733260739551 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260739551Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260739552 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260739552Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260739552Cleaning up temporary data from old regions at 1733260739564 (+12 ms)Running coprocessor post-open hooks at 1733260739567 (+3 ms)Region opened successfully at 1733260739567 2024-12-03T21:18:59,568 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733260739525 2024-12-03T21:18:59,572 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T21:18:59,572 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-03T21:18:59,573 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=101545f66cbd,33773,1733260738594 2024-12-03T21:18:59,574 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 101545f66cbd,33773,1733260738594, state=OPEN 2024-12-03T21:18:59,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41581-0x1019e59b1590000, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T21:18:59,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33773-0x1019e59b1590001, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T21:18:59,663 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=101545f66cbd,33773,1733260738594 2024-12-03T21:18:59,663 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T21:18:59,663 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T21:18:59,670 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T21:18:59,670 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=101545f66cbd,33773,1733260738594 in 292 msec 2024-12-03T21:18:59,674 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T21:18:59,675 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 739 msec 2024-12-03T21:18:59,676 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T21:18:59,676 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-03T21:18:59,678 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T21:18:59,678 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=101545f66cbd,33773,1733260738594, seqNum=-1] 2024-12-03T21:18:59,678 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T21:18:59,680 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47513, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T21:18:59,688 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 804 msec 2024-12-03T21:18:59,688 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733260739688, completionTime=-1 2024-12-03T21:18:59,688 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-03T21:18:59,688 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-03T21:18:59,691 INFO [master/101545f66cbd:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-03T21:18:59,691 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733260799691 2024-12-03T21:18:59,691 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733260859691 2024-12-03T21:18:59,691 INFO [master/101545f66cbd:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 3 msec 2024-12-03T21:18:59,692 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,41581,1733260738434-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:59,692 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,41581,1733260738434-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:59,692 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,41581,1733260738434-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:59,692 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-101545f66cbd:41581, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:59,692 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:59,693 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:59,695 DEBUG [master/101545f66cbd:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-03T21:18:59,697 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.044sec 2024-12-03T21:18:59,698 INFO [master/101545f66cbd:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T21:18:59,698 INFO [master/101545f66cbd:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T21:18:59,698 INFO [master/101545f66cbd:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T21:18:59,698 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T21:18:59,698 INFO [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T21:18:59,698 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,41581,1733260738434-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T21:18:59,698 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,41581,1733260738434-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T21:18:59,701 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-03T21:18:59,701 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T21:18:59,701 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,41581,1733260738434-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:18:59,719 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d5f8483, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T21:18:59,720 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 101545f66cbd,41581,-1 for getting cluster id 2024-12-03T21:18:59,720 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T21:18:59,721 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '837cd589-4947-4570-a2dc-60391fbaed19' 2024-12-03T21:18:59,721 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T21:18:59,722 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "837cd589-4947-4570-a2dc-60391fbaed19" 2024-12-03T21:18:59,722 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f38af07, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T21:18:59,722 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [101545f66cbd,41581,-1] 2024-12-03T21:18:59,722 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T21:18:59,722 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:18:59,724 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40898, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T21:18:59,724 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4feb9e1a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T21:18:59,725 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T21:18:59,726 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=101545f66cbd,33773,1733260738594, seqNum=-1] 2024-12-03T21:18:59,726 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T21:18:59,727 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56594, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T21:18:59,729 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=101545f66cbd,41581,1733260738434 2024-12-03T21:18:59,729 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:18:59,731 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-03T21:18:59,731 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-12-03T21:18:59,731 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-12-03T21:18:59,731 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-03T21:18:59,732 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 101545f66cbd,41581,1733260738434 2024-12-03T21:18:59,732 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@29e59a49 2024-12-03T21:18:59,732 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T21:18:59,734 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40914, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T21:18:59,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41581 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-03T21:18:59,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41581 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-03T21:18:59,735 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41581 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T21:18:59,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41581 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-12-03T21:18:59,738 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T21:18:59,738 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:18:59,738 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41581 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-12-03T21:18:59,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41581 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T21:18:59,740 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T21:18:59,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42839 is added to blk_1073741835_1011 (size=395) 2024-12-03T21:18:59,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42379 is added to blk_1073741835_1011 (size=395) 2024-12-03T21:18:59,749 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4fc8cb446cbadc93395caddbaf2dcc6b, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733260739735.4fc8cb446cbadc93395caddbaf2dcc6b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c 2024-12-03T21:18:59,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42379 is added to blk_1073741836_1012 (size=78) 2024-12-03T21:18:59,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42839 is added to blk_1073741836_1012 (size=78) 2024-12-03T21:18:59,757 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733260739735.4fc8cb446cbadc93395caddbaf2dcc6b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:18:59,757 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 4fc8cb446cbadc93395caddbaf2dcc6b, disabling compactions & flushes 2024-12-03T21:18:59,757 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733260739735.4fc8cb446cbadc93395caddbaf2dcc6b. 2024-12-03T21:18:59,757 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733260739735.4fc8cb446cbadc93395caddbaf2dcc6b. 2024-12-03T21:18:59,757 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733260739735.4fc8cb446cbadc93395caddbaf2dcc6b. after waiting 0 ms 2024-12-03T21:18:59,757 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733260739735.4fc8cb446cbadc93395caddbaf2dcc6b. 2024-12-03T21:18:59,757 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733260739735.4fc8cb446cbadc93395caddbaf2dcc6b. 2024-12-03T21:18:59,757 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4fc8cb446cbadc93395caddbaf2dcc6b: Waiting for close lock at 1733260739757Disabling compacts and flushes for region at 1733260739757Disabling writes for close at 1733260739757Writing region close event to WAL at 1733260739757Closed at 1733260739757 2024-12-03T21:18:59,759 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T21:18:59,759 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1733260739735.4fc8cb446cbadc93395caddbaf2dcc6b.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1733260739759"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733260739759"}]},"ts":"1733260739759"} 2024-12-03T21:18:59,763 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-03T21:18:59,764 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T21:18:59,764 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733260739764"}]},"ts":"1733260739764"} 2024-12-03T21:18:59,767 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-12-03T21:18:59,767 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=4fc8cb446cbadc93395caddbaf2dcc6b, ASSIGN}] 2024-12-03T21:18:59,769 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=4fc8cb446cbadc93395caddbaf2dcc6b, ASSIGN 2024-12-03T21:18:59,770 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=4fc8cb446cbadc93395caddbaf2dcc6b, ASSIGN; state=OFFLINE, location=101545f66cbd,33773,1733260738594; forceNewPlan=false, retain=false 2024-12-03T21:18:59,826 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:18:59,836 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:18:59,921 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4fc8cb446cbadc93395caddbaf2dcc6b, regionState=OPENING, regionLocation=101545f66cbd,33773,1733260738594 2024-12-03T21:18:59,923 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=4fc8cb446cbadc93395caddbaf2dcc6b, ASSIGN because future has completed 2024-12-03T21:18:59,924 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4fc8cb446cbadc93395caddbaf2dcc6b, server=101545f66cbd,33773,1733260738594}] 2024-12-03T21:19:00,081 INFO [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1733260739735.4fc8cb446cbadc93395caddbaf2dcc6b. 2024-12-03T21:19:00,081 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 4fc8cb446cbadc93395caddbaf2dcc6b, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733260739735.4fc8cb446cbadc93395caddbaf2dcc6b.', STARTKEY => '', ENDKEY => ''} 2024-12-03T21:19:00,081 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 4fc8cb446cbadc93395caddbaf2dcc6b 2024-12-03T21:19:00,081 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733260739735.4fc8cb446cbadc93395caddbaf2dcc6b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:19:00,081 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 4fc8cb446cbadc93395caddbaf2dcc6b 2024-12-03T21:19:00,081 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 4fc8cb446cbadc93395caddbaf2dcc6b 2024-12-03T21:19:00,083 INFO [StoreOpener-4fc8cb446cbadc93395caddbaf2dcc6b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 4fc8cb446cbadc93395caddbaf2dcc6b 2024-12-03T21:19:00,085 INFO [StoreOpener-4fc8cb446cbadc93395caddbaf2dcc6b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4fc8cb446cbadc93395caddbaf2dcc6b columnFamilyName info 2024-12-03T21:19:00,085 DEBUG [StoreOpener-4fc8cb446cbadc93395caddbaf2dcc6b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:19:00,086 INFO [StoreOpener-4fc8cb446cbadc93395caddbaf2dcc6b-1 {}] regionserver.HStore(327): Store=4fc8cb446cbadc93395caddbaf2dcc6b/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T21:19:00,086 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 4fc8cb446cbadc93395caddbaf2dcc6b 2024-12-03T21:19:00,088 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/data/default/TestLogRolling-testLogRollOnPipelineRestart/4fc8cb446cbadc93395caddbaf2dcc6b 2024-12-03T21:19:00,088 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/data/default/TestLogRolling-testLogRollOnPipelineRestart/4fc8cb446cbadc93395caddbaf2dcc6b 2024-12-03T21:19:00,089 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 4fc8cb446cbadc93395caddbaf2dcc6b 2024-12-03T21:19:00,089 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 4fc8cb446cbadc93395caddbaf2dcc6b 2024-12-03T21:19:00,092 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 4fc8cb446cbadc93395caddbaf2dcc6b 2024-12-03T21:19:00,094 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/data/default/TestLogRolling-testLogRollOnPipelineRestart/4fc8cb446cbadc93395caddbaf2dcc6b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T21:19:00,094 INFO [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 4fc8cb446cbadc93395caddbaf2dcc6b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=707127, jitterRate=-0.10084261000156403}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T21:19:00,094 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4fc8cb446cbadc93395caddbaf2dcc6b 2024-12-03T21:19:00,094 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 4fc8cb446cbadc93395caddbaf2dcc6b: Running coprocessor pre-open hook at 1733260740082Writing region info on filesystem at 1733260740082Initializing all the Stores at 1733260740083 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260740083Cleaning up temporary data from old regions at 1733260740089 (+6 ms)Running coprocessor post-open hooks at 1733260740094 (+5 ms)Region opened successfully at 1733260740094 2024-12-03T21:19:00,095 INFO [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1733260739735.4fc8cb446cbadc93395caddbaf2dcc6b., pid=6, masterSystemTime=1733260740077 2024-12-03T21:19:00,097 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1733260739735.4fc8cb446cbadc93395caddbaf2dcc6b. 2024-12-03T21:19:00,098 INFO [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1733260739735.4fc8cb446cbadc93395caddbaf2dcc6b. 2024-12-03T21:19:00,098 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4fc8cb446cbadc93395caddbaf2dcc6b, regionState=OPEN, openSeqNum=2, regionLocation=101545f66cbd,33773,1733260738594 2024-12-03T21:19:00,100 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4fc8cb446cbadc93395caddbaf2dcc6b, server=101545f66cbd,33773,1733260738594 because future has completed 2024-12-03T21:19:00,104 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-03T21:19:00,104 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 4fc8cb446cbadc93395caddbaf2dcc6b, server=101545f66cbd,33773,1733260738594 in 177 msec 2024-12-03T21:19:00,107 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-03T21:19:00,107 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=4fc8cb446cbadc93395caddbaf2dcc6b, ASSIGN in 337 msec 2024-12-03T21:19:00,108 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T21:19:00,108 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733260740108"}]},"ts":"1733260740108"} 2024-12-03T21:19:00,111 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-12-03T21:19:00,112 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T21:19:00,115 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 377 msec 2024-12-03T21:19:00,826 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:00,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:01,827 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:01,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:02,829 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:02,839 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:03,707 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-03T21:19:03,707 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-03T21:19:03,708 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-03T21:19:03,708 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-12-03T21:19:03,709 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T21:19:03,709 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-03T21:19:03,830 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:03,839 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:04,830 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:04,840 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:05,054 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T21:19:05,074 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:05,074 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:05,074 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:05,075 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:05,075 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:05,076 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:05,078 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:05,078 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:05,078 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:05,081 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:05,087 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-03T21:19:05,087 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-12-03T21:19:05,832 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:05,841 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:06,833 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:06,842 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:07,834 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:07,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:08,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:08,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:09,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41581 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T21:19:09,796 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-12-03T21:19:09,796 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-12-03T21:19:09,803 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-12-03T21:19:09,803 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1733260739735.4fc8cb446cbadc93395caddbaf2dcc6b. 2024-12-03T21:19:09,809 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1733260739735.4fc8cb446cbadc93395caddbaf2dcc6b., hostname=101545f66cbd,33773,1733260738594, seqNum=2] 2024-12-03T21:19:09,836 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:09,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:10,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:10,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:11,812 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260739100 2024-12-03T21:19:11,813 WARN [ResponseProcessor for block BP-317319251-172.17.0.2-1733260736457:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-317319251-172.17.0.2-1733260736457:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:19:11,813 WARN [ResponseProcessor for block BP-317319251-172.17.0.2-1733260736457:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-317319251-172.17.0.2-1733260736457:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:19:11,813 WARN [DataStreamer for file /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/WALs/101545f66cbd,41581,1733260738434/101545f66cbd%2C41581%2C1733260738434.1733260738713 block BP-317319251-172.17.0.2-1733260736457:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-317319251-172.17.0.2-1733260736457:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42839,DS-bfaa2733-3765-41d0-9ed1-5fa513afe699,DISK], DatanodeInfoWithStorage[127.0.0.1:42379,DS-f8da228f-f7c1-402f-b426-5d71fccc0b67,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42839,DS-bfaa2733-3765-41d0-9ed1-5fa513afe699,DISK]) is bad. 2024-12-03T21:19:11,813 WARN [ResponseProcessor for block BP-317319251-172.17.0.2-1733260736457:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-317319251-172.17.0.2-1733260736457:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-317319251-172.17.0.2-1733260736457:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:42839,DS-bfaa2733-3765-41d0-9ed1-5fa513afe699,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:19:11,813 WARN [DataStreamer for file /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.meta.1733260739539.meta block BP-317319251-172.17.0.2-1733260736457:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-317319251-172.17.0.2-1733260736457:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42839,DS-bfaa2733-3765-41d0-9ed1-5fa513afe699,DISK], DatanodeInfoWithStorage[127.0.0.1:42379,DS-f8da228f-f7c1-402f-b426-5d71fccc0b67,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42839,DS-bfaa2733-3765-41d0-9ed1-5fa513afe699,DISK]) is bad. 2024-12-03T21:19:11,813 WARN [DataStreamer for file /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260739100 block BP-317319251-172.17.0.2-1733260736457:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-317319251-172.17.0.2-1733260736457:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42379,DS-f8da228f-f7c1-402f-b426-5d71fccc0b67,DISK], DatanodeInfoWithStorage[127.0.0.1:42839,DS-bfaa2733-3765-41d0-9ed1-5fa513afe699,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42839,DS-bfaa2733-3765-41d0-9ed1-5fa513afe699,DISK]) is bad. 2024-12-03T21:19:11,813 WARN [PacketResponder: BP-317319251-172.17.0.2-1733260736457:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:42839] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:19:11,813 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-764377988_22 at /127.0.0.1:34956 [Receiving block BP-317319251-172.17.0.2-1733260736457:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:42839:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34956 dst: /127.0.0.1:42839 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:19:11,813 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-764377988_22 at /127.0.0.1:35202 [Receiving block BP-317319251-172.17.0.2-1733260736457:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:42379:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35202 dst: /127.0.0.1:42379 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:19:11,814 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1843759525_22 at /127.0.0.1:35232 [Receiving block BP-317319251-172.17.0.2-1733260736457:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:42379:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35232 dst: /127.0.0.1:42379 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:19:11,814 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1843759525_22 at /127.0.0.1:34998 [Receiving block BP-317319251-172.17.0.2-1733260736457:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:42839:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34998 dst: /127.0.0.1:42839 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:19:11,814 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1843759525_22 at /127.0.0.1:35236 [Receiving block BP-317319251-172.17.0.2-1733260736457:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:42379:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35236 dst: /127.0.0.1:42379 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:19:11,814 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1843759525_22 at /127.0.0.1:34984 [Receiving block BP-317319251-172.17.0.2-1733260736457:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:42839:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34984 dst: /127.0.0.1:42839 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:19:11,838 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:11,847 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@229e7731{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:19:11,848 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2eaba4db{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T21:19:11,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:11,848 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T21:19:11,848 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@43ae6a54{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T21:19:11,848 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@107ec413{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/hadoop.log.dir/,STOPPED} 2024-12-03T21:19:11,850 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T21:19:11,850 WARN [BP-317319251-172.17.0.2-1733260736457 heartbeating to localhost/127.0.0.1:42369 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T21:19:11,850 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T21:19:11,850 WARN [BP-317319251-172.17.0.2-1733260736457 heartbeating to localhost/127.0.0.1:42369 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-317319251-172.17.0.2-1733260736457 (Datanode Uuid 5bd2cc89-e474-4f0b-89d1-e18cbbd6cbf4) service to localhost/127.0.0.1:42369 2024-12-03T21:19:11,851 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/cluster_8c45575c-c5e0-2e31-2f83-6ba9e05c8f38/data/data3/current/BP-317319251-172.17.0.2-1733260736457 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:19:11,851 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/cluster_8c45575c-c5e0-2e31-2f83-6ba9e05c8f38/data/data4/current/BP-317319251-172.17.0.2-1733260736457 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:19:11,852 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T21:19:11,862 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:19:11,866 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T21:19:11,867 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T21:19:11,867 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T21:19:11,867 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T21:19:11,867 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@51ffb6e6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/hadoop.log.dir/,AVAILABLE} 2024-12-03T21:19:11,868 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@552dccee{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T21:19:11,958 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2dd4b4a8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/java.io.tmpdir/jetty-localhost-41623-hadoop-hdfs-3_4_1-tests_jar-_-any-4863387848307221336/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:19:11,958 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@75137f15{HTTP/1.1, (http/1.1)}{localhost:41623} 2024-12-03T21:19:11,958 INFO [Time-limited test {}] server.Server(415): Started @165056ms 2024-12-03T21:19:11,960 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T21:19:11,984 WARN [ResponseProcessor for block BP-317319251-172.17.0.2-1733260736457:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-317319251-172.17.0.2-1733260736457:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:19:11,984 WARN [ResponseProcessor for block BP-317319251-172.17.0.2-1733260736457:blk_1073741834_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-317319251-172.17.0.2-1733260736457:blk_1073741834_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:19:11,984 WARN [ResponseProcessor for block BP-317319251-172.17.0.2-1733260736457:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-317319251-172.17.0.2-1733260736457:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:19:11,985 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1843759525_22 at /127.0.0.1:53550 [Receiving block BP-317319251-172.17.0.2-1733260736457:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:42379:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53550 dst: /127.0.0.1:42379 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:19:11,985 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-764377988_22 at /127.0.0.1:53562 [Receiving block BP-317319251-172.17.0.2-1733260736457:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:42379:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53562 dst: /127.0.0.1:42379 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:19:11,985 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1843759525_22 at /127.0.0.1:53578 [Receiving block BP-317319251-172.17.0.2-1733260736457:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:42379:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53578 dst: /127.0.0.1:42379 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:19:11,986 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3a6fb45d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:19:11,986 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4a03251a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T21:19:11,986 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T21:19:11,986 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@245efd98{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T21:19:11,986 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a2c549c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/hadoop.log.dir/,STOPPED} 2024-12-03T21:19:11,987 WARN [BP-317319251-172.17.0.2-1733260736457 heartbeating to localhost/127.0.0.1:42369 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T21:19:11,987 WARN [BP-317319251-172.17.0.2-1733260736457 heartbeating to localhost/127.0.0.1:42369 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-317319251-172.17.0.2-1733260736457 (Datanode Uuid 438aa9b8-2ca8-41c5-b946-3172aa9e8abc) service to localhost/127.0.0.1:42369 2024-12-03T21:19:11,987 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T21:19:11,987 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T21:19:11,988 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/cluster_8c45575c-c5e0-2e31-2f83-6ba9e05c8f38/data/data1/current/BP-317319251-172.17.0.2-1733260736457 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:19:11,988 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/cluster_8c45575c-c5e0-2e31-2f83-6ba9e05c8f38/data/data2/current/BP-317319251-172.17.0.2-1733260736457 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:19:11,988 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T21:19:11,997 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:19:12,000 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T21:19:12,001 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T21:19:12,001 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T21:19:12,001 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T21:19:12,001 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70edb9bc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/hadoop.log.dir/,AVAILABLE} 2024-12-03T21:19:12,001 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@223db456{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T21:19:12,090 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@60dfd9ea{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/java.io.tmpdir/jetty-localhost-33153-hadoop-hdfs-3_4_1-tests_jar-_-any-17084473529836493118/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:19:12,090 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2482c0d4{HTTP/1.1, (http/1.1)}{localhost:33153} 2024-12-03T21:19:12,090 INFO [Time-limited test {}] server.Server(415): Started @165188ms 2024-12-03T21:19:12,092 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T21:19:12,433 WARN [Thread-1323 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T21:19:12,435 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xec7711d7136fca93 with lease ID 0x4fa6c3842fe7fe68: from storage DS-bfaa2733-3765-41d0-9ed1-5fa513afe699 node DatanodeRegistration(127.0.0.1:37817, datanodeUuid=5bd2cc89-e474-4f0b-89d1-e18cbbd6cbf4, infoPort=39009, infoSecurePort=0, ipcPort=41093, storageInfo=lv=-57;cid=testClusterID;nsid=1463046525;c=1733260736457), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:19:12,435 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xec7711d7136fca93 with lease ID 0x4fa6c3842fe7fe68: from storage DS-bb2c3638-4b19-4c84-9ccc-5d977c537c1c node DatanodeRegistration(127.0.0.1:37817, datanodeUuid=5bd2cc89-e474-4f0b-89d1-e18cbbd6cbf4, infoPort=39009, infoSecurePort=0, ipcPort=41093, storageInfo=lv=-57;cid=testClusterID;nsid=1463046525;c=1733260736457), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:19:12,542 WARN [Thread-1343 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T21:19:12,544 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8160d239824955af with lease ID 0x4fa6c3842fe7fe69: from storage DS-f8da228f-f7c1-402f-b426-5d71fccc0b67 node DatanodeRegistration(127.0.0.1:46805, datanodeUuid=438aa9b8-2ca8-41c5-b946-3172aa9e8abc, infoPort=35829, infoSecurePort=0, ipcPort=35605, storageInfo=lv=-57;cid=testClusterID;nsid=1463046525;c=1733260736457), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:19:12,544 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8160d239824955af with lease ID 0x4fa6c3842fe7fe69: from storage DS-a3977948-18c3-4227-bb48-ed9d474f55a4 node DatanodeRegistration(127.0.0.1:46805, datanodeUuid=438aa9b8-2ca8-41c5-b946-3172aa9e8abc, infoPort=35829, infoSecurePort=0, ipcPort=35605, storageInfo=lv=-57;cid=testClusterID;nsid=1463046525;c=1733260736457), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:19:12,839 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:12,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:13,115 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-12-03T21:19:13,118 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-12-03T21:19:13,121 ERROR [FSHLog-0-hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c-prefix:101545f66cbd,33773,1733260738594 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42379,DS-f8da228f-f7c1-402f-b426-5d71fccc0b67,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:19:13,121 WARN [FSHLog-0-hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c-prefix:101545f66cbd,33773,1733260738594 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42379,DS-f8da228f-f7c1-402f-b426-5d71fccc0b67,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:19:13,121 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 101545f66cbd%2C33773%2C1733260738594:(num 1733260739100) roll requested 2024-12-03T21:19:13,121 INFO [regionserver/101545f66cbd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C33773%2C1733260738594.1733260753121 2024-12-03T21:19:13,130 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260739100 newFile=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260753121 2024-12-03T21:19:13,131 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:13,131 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:13,131 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:13,131 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:13,131 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:13,131 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260739100 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260753121 2024-12-03T21:19:13,132 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42379,DS-f8da228f-f7c1-402f-b426-5d71fccc0b67,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:19:13,132 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42379,DS-f8da228f-f7c1-402f-b426-5d71fccc0b67,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:19:13,132 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260739100 2024-12-03T21:19:13,132 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35829:35829),(127.0.0.1/127.0.0.1:39009:39009)] 2024-12-03T21:19:13,132 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260739100 is not closed yet, will try archiving it next time 2024-12-03T21:19:13,132 WARN [IPC Server handler 0 on default port 42369 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260739100 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-12-03T21:19:13,133 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260739100 after 1ms 2024-12-03T21:19:13,840 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:13,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:14,841 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:14,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:15,138 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-12-03T21:19:15,436 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-03T21:19:15,841 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:15,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:16,842 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:16,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:17,133 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260739100 after 4001ms 2024-12-03T21:19:17,141 WARN [ResponseProcessor for block BP-317319251-172.17.0.2-1733260736457:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-317319251-172.17.0.2-1733260736457:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:19:17,141 WARN [DataStreamer for file /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260753121 block BP-317319251-172.17.0.2-1733260736457:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-317319251-172.17.0.2-1733260736457:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46805,DS-f8da228f-f7c1-402f-b426-5d71fccc0b67,DISK], DatanodeInfoWithStorage[127.0.0.1:37817,DS-bfaa2733-3765-41d0-9ed1-5fa513afe699,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46805,DS-f8da228f-f7c1-402f-b426-5d71fccc0b67,DISK]) is bad. 2024-12-03T21:19:17,141 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1843759525_22 at /127.0.0.1:38472 [Receiving block BP-317319251-172.17.0.2-1733260736457:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:46805:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38472 dst: /127.0.0.1:46805 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:19:17,141 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1843759525_22 at /127.0.0.1:49854 [Receiving block BP-317319251-172.17.0.2-1733260736457:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:37817:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49854 dst: /127.0.0.1:37817 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:19:17,143 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@60dfd9ea{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:19:17,143 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2482c0d4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T21:19:17,143 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T21:19:17,143 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@223db456{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T21:19:17,144 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70edb9bc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/hadoop.log.dir/,STOPPED} 2024-12-03T21:19:17,144 WARN [BP-317319251-172.17.0.2-1733260736457 heartbeating to localhost/127.0.0.1:42369 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T21:19:17,144 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T21:19:17,145 WARN [BP-317319251-172.17.0.2-1733260736457 heartbeating to localhost/127.0.0.1:42369 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-317319251-172.17.0.2-1733260736457 (Datanode Uuid 438aa9b8-2ca8-41c5-b946-3172aa9e8abc) service to localhost/127.0.0.1:42369 2024-12-03T21:19:17,145 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T21:19:17,145 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/cluster_8c45575c-c5e0-2e31-2f83-6ba9e05c8f38/data/data1/current/BP-317319251-172.17.0.2-1733260736457 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:19:17,145 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/cluster_8c45575c-c5e0-2e31-2f83-6ba9e05c8f38/data/data2/current/BP-317319251-172.17.0.2-1733260736457 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:19:17,145 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T21:19:17,158 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:19:17,161 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T21:19:17,162 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T21:19:17,162 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T21:19:17,163 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T21:19:17,163 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@703ef203{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/hadoop.log.dir/,AVAILABLE} 2024-12-03T21:19:17,164 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7689d41a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T21:19:17,258 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7e276c4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/java.io.tmpdir/jetty-localhost-46603-hadoop-hdfs-3_4_1-tests_jar-_-any-3625762218488091143/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:19:17,258 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1386a0c3{HTTP/1.1, (http/1.1)}{localhost:46603} 2024-12-03T21:19:17,258 INFO [Time-limited test {}] server.Server(415): Started @170356ms 2024-12-03T21:19:17,260 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T21:19:17,276 WARN [ResponseProcessor for block BP-317319251-172.17.0.2-1733260736457:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-317319251-172.17.0.2-1733260736457:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:19:17,276 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1843759525_22 at /127.0.0.1:54880 [Receiving block BP-317319251-172.17.0.2-1733260736457:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:37817:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54880 dst: /127.0.0.1:37817 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:19:17,281 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2dd4b4a8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:19:17,282 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@75137f15{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T21:19:17,282 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T21:19:17,282 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@552dccee{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T21:19:17,282 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@51ffb6e6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/hadoop.log.dir/,STOPPED} 2024-12-03T21:19:17,283 WARN [BP-317319251-172.17.0.2-1733260736457 heartbeating to localhost/127.0.0.1:42369 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T21:19:17,283 WARN [BP-317319251-172.17.0.2-1733260736457 heartbeating to localhost/127.0.0.1:42369 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-317319251-172.17.0.2-1733260736457 (Datanode Uuid 5bd2cc89-e474-4f0b-89d1-e18cbbd6cbf4) service to localhost/127.0.0.1:42369 2024-12-03T21:19:17,283 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T21:19:17,283 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T21:19:17,283 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/cluster_8c45575c-c5e0-2e31-2f83-6ba9e05c8f38/data/data3/current/BP-317319251-172.17.0.2-1733260736457 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:19:17,284 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/cluster_8c45575c-c5e0-2e31-2f83-6ba9e05c8f38/data/data4/current/BP-317319251-172.17.0.2-1733260736457 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:19:17,284 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T21:19:17,293 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:19:17,297 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T21:19:17,299 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T21:19:17,299 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T21:19:17,299 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T21:19:17,300 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7e047afe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/hadoop.log.dir/,AVAILABLE} 2024-12-03T21:19:17,300 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1b3ddecc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T21:19:17,392 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@27147a8d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/java.io.tmpdir/jetty-localhost-45259-hadoop-hdfs-3_4_1-tests_jar-_-any-16215353605562535637/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:19:17,392 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2bfa0122{HTTP/1.1, (http/1.1)}{localhost:45259} 2024-12-03T21:19:17,392 INFO [Time-limited test {}] server.Server(415): Started @170490ms 2024-12-03T21:19:17,394 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T21:19:17,776 WARN [Thread-1397 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T21:19:17,778 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc9da90aea749c683 with lease ID 0x4fa6c3842fe7fe6a: from storage DS-f8da228f-f7c1-402f-b426-5d71fccc0b67 node DatanodeRegistration(127.0.0.1:41453, datanodeUuid=438aa9b8-2ca8-41c5-b946-3172aa9e8abc, infoPort=37659, infoSecurePort=0, ipcPort=39639, storageInfo=lv=-57;cid=testClusterID;nsid=1463046525;c=1733260736457), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:19:17,778 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc9da90aea749c683 with lease ID 0x4fa6c3842fe7fe6a: from storage DS-a3977948-18c3-4227-bb48-ed9d474f55a4 node DatanodeRegistration(127.0.0.1:41453, datanodeUuid=438aa9b8-2ca8-41c5-b946-3172aa9e8abc, infoPort=37659, infoSecurePort=0, ipcPort=39639, storageInfo=lv=-57;cid=testClusterID;nsid=1463046525;c=1733260736457), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:19:17,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:17,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:17,893 WARN [Thread-1417 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T21:19:17,895 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc0a9b21e5a521ee5 with lease ID 0x4fa6c3842fe7fe6b: from storage DS-bfaa2733-3765-41d0-9ed1-5fa513afe699 node DatanodeRegistration(127.0.0.1:42025, datanodeUuid=5bd2cc89-e474-4f0b-89d1-e18cbbd6cbf4, infoPort=37477, infoSecurePort=0, ipcPort=38993, storageInfo=lv=-57;cid=testClusterID;nsid=1463046525;c=1733260736457), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:19:17,896 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc0a9b21e5a521ee5 with lease ID 0x4fa6c3842fe7fe6b: from storage DS-bb2c3638-4b19-4c84-9ccc-5d977c537c1c node DatanodeRegistration(127.0.0.1:42025, datanodeUuid=5bd2cc89-e474-4f0b-89d1-e18cbbd6cbf4, infoPort=37477, infoSecurePort=0, ipcPort=38993, storageInfo=lv=-57;cid=testClusterID;nsid=1463046525;c=1733260736457), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:19:18,419 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-12-03T21:19:18,424 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-12-03T21:19:18,426 ERROR [FSHLog-0-hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c-prefix:101545f66cbd,33773,1733260738594 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37817,DS-bfaa2733-3765-41d0-9ed1-5fa513afe699,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:19:18,426 WARN [FSHLog-0-hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c-prefix:101545f66cbd,33773,1733260738594 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37817,DS-bfaa2733-3765-41d0-9ed1-5fa513afe699,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:19:18,426 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 101545f66cbd%2C33773%2C1733260738594:(num 1733260753121) roll requested 2024-12-03T21:19:18,426 INFO [regionserver/101545f66cbd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C33773%2C1733260738594.1733260758426 2024-12-03T21:19:18,432 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260753121 newFile=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260758426 2024-12-03T21:19:18,432 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:18,432 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:18,433 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:18,433 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:18,433 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:18,433 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260753121 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260758426 2024-12-03T21:19:18,433 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37817,DS-bfaa2733-3765-41d0-9ed1-5fa513afe699,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:19:18,433 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37817,DS-bfaa2733-3765-41d0-9ed1-5fa513afe699,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:19:18,433 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260753121 2024-12-03T21:19:18,434 WARN [IPC Server handler 3 on default port 42369 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260753121 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-12-03T21:19:18,434 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37659:37659),(127.0.0.1/127.0.0.1:37477:37477)] 2024-12-03T21:19:18,434 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260753121 after 1ms 2024-12-03T21:19:18,434 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260753121 is not closed yet, will try archiving it next time 2024-12-03T21:19:18,844 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:18,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:19,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:19,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:20,436 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C33773%2C1733260738594.1733260760435 2024-12-03T21:19:20,447 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260758426 newFile=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260760435 2024-12-03T21:19:20,447 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:20,447 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:20,447 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:20,448 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:20,448 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:20,448 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260758426 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260760435 2024-12-03T21:19:20,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42025 is added to blk_1073741838_1019 (size=1264) 2024-12-03T21:19:20,450 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37659:37659),(127.0.0.1/127.0.0.1:37477:37477)] 2024-12-03T21:19:20,450 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260753121 is not closed yet, will try archiving it next time 2024-12-03T21:19:20,450 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260758426 is not closed yet, will try archiving it next time 2024-12-03T21:19:20,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41453 is added to blk_1073741838_1019 (size=1264) 2024-12-03T21:19:20,450 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260739100 2024-12-03T21:19:20,450 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260739100 2024-12-03T21:19:20,451 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260739100 after 1ms 2024-12-03T21:19:20,451 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260739100 2024-12-03T21:19:20,452 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260753121 is not closed yet, will try archiving it next time 2024-12-03T21:19:20,461 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1733260740095/Put/vlen=218/seqid=0] 2024-12-03T21:19:20,461 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1733260749811/Put/vlen=1045/seqid=0] 2024-12-03T21:19:20,462 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260739100 2024-12-03T21:19:20,462 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260753121 2024-12-03T21:19:20,462 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260753121 2024-12-03T21:19:20,462 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260753121 after 0ms 2024-12-03T21:19:20,462 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260753121 2024-12-03T21:19:20,466 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1733260753120/Put/vlen=1045/seqid=0] 2024-12-03T21:19:20,467 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1733260755139/Put/vlen=1045/seqid=0] 2024-12-03T21:19:20,467 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260753121 2024-12-03T21:19:20,467 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260758426 2024-12-03T21:19:20,467 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260758426 2024-12-03T21:19:20,468 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260758426 after 1ms 2024-12-03T21:19:20,468 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260758426 2024-12-03T21:19:20,473 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1733260758425/Put/vlen=1045/seqid=0] 2024-12-03T21:19:20,473 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260760435 2024-12-03T21:19:20,473 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260760435 2024-12-03T21:19:20,474 WARN [IPC Server handler 2 on default port 42369 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260760435 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-12-03T21:19:20,474 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260760435 after 1ms 2024-12-03T21:19:20,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:20,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:20,898 WARN [ResponseProcessor for block BP-317319251-172.17.0.2-1733260736457:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-317319251-172.17.0.2-1733260736457:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:19:20,898 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-764377988_22 at /127.0.0.1:39618 [Receiving block BP-317319251-172.17.0.2-1733260736457:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:42025:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39618 dst: /127.0.0.1:42025 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:19:20,899 WARN [DataStreamer for file /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260760435 block BP-317319251-172.17.0.2-1733260736457:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-317319251-172.17.0.2-1733260736457:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41453,DS-f8da228f-f7c1-402f-b426-5d71fccc0b67,DISK], DatanodeInfoWithStorage[127.0.0.1:42025,DS-bfaa2733-3765-41d0-9ed1-5fa513afe699,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41453,DS-f8da228f-f7c1-402f-b426-5d71fccc0b67,DISK]) is bad. 2024-12-03T21:19:20,898 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-764377988_22 at /127.0.0.1:57934 [Receiving block BP-317319251-172.17.0.2-1733260736457:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:41453:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57934 dst: /127.0.0.1:41453 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:41453 remote=/127.0.0.1:57934]. Total timeout mills is 60000, 59548 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:19:20,902 WARN [DataStreamer for file /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260760435 block BP-317319251-172.17.0.2-1733260736457:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-317319251-172.17.0.2-1733260736457:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:19:20,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41453 is added to blk_1073741839_1022 (size=85) 2024-12-03T21:19:21,778 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-03T21:19:21,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:21,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:22,435 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260753121 after 4002ms 2024-12-03T21:19:22,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:22,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:23,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:23,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:24,476 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260760435 after 4003ms 2024-12-03T21:19:24,476 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260760435 2024-12-03T21:19:24,484 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260760435 2024-12-03T21:19:24,485 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-12-03T21:19:24,486 ERROR [FSHLog-0-hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c-prefix:101545f66cbd,33773,1733260738594.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42379,DS-f8da228f-f7c1-402f-b426-5d71fccc0b67,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:19:24,486 WARN [FSHLog-0-hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c-prefix:101545f66cbd,33773,1733260738594.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42379,DS-f8da228f-f7c1-402f-b426-5d71fccc0b67,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:19:24,486 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 101545f66cbd%2C33773%2C1733260738594.meta:.meta(num 1733260739539) roll requested 2024-12-03T21:19:24,486 INFO [regionserver/101545f66cbd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C33773%2C1733260738594.meta.1733260764486.meta 2024-12-03T21:19:24,493 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:24,493 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:24,493 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:24,493 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:24,493 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:24,494 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.meta.1733260739539.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.meta.1733260764486.meta 2024-12-03T21:19:24,494 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42379,DS-f8da228f-f7c1-402f-b426-5d71fccc0b67,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:19:24,494 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42379,DS-f8da228f-f7c1-402f-b426-5d71fccc0b67,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:19:24,494 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.meta.1733260739539.meta 2024-12-03T21:19:24,494 WARN [IPC Server handler 3 on default port 42369 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.meta.1733260739539.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1015 2024-12-03T21:19:24,495 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.meta.1733260739539.meta after 1ms 2024-12-03T21:19:24,495 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37477:37477),(127.0.0.1/127.0.0.1:37659:37659)] 2024-12-03T21:19:24,495 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.meta.1733260739539.meta is not closed yet, will try archiving it next time 2024-12-03T21:19:24,512 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/data/hbase/meta/1588230740/.tmp/info/3b7d4a2ea53744bebafd4587d3c6ca69 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1733260739735.4fc8cb446cbadc93395caddbaf2dcc6b./info:regioninfo/1733260740098/Put/seqid=0 2024-12-03T21:19:24,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42025 is added to blk_1073741841_1025 (size=7125) 2024-12-03T21:19:24,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41453 is added to blk_1073741841_1025 (size=7125) 2024-12-03T21:19:24,518 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/data/hbase/meta/1588230740/.tmp/info/3b7d4a2ea53744bebafd4587d3c6ca69 2024-12-03T21:19:24,536 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/data/hbase/meta/1588230740/.tmp/ns/c913b08ab894424aa595ecd2de5ca647 is 43, key is default/ns:d/1733260739680/Put/seqid=0 2024-12-03T21:19:24,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41453 is added to blk_1073741842_1026 (size=5153) 2024-12-03T21:19:24,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42025 is added to blk_1073741842_1026 (size=5153) 2024-12-03T21:19:24,542 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/data/hbase/meta/1588230740/.tmp/ns/c913b08ab894424aa595ecd2de5ca647 2024-12-03T21:19:24,559 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/data/hbase/meta/1588230740/.tmp/table/f7b122fd0615424a9dd2fdc9e3d2cbef is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1733260740108/Put/seqid=0 2024-12-03T21:19:24,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42025 is added to blk_1073741843_1027 (size=5438) 2024-12-03T21:19:24,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41453 is added to blk_1073741843_1027 (size=5438) 2024-12-03T21:19:24,565 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/data/hbase/meta/1588230740/.tmp/table/f7b122fd0615424a9dd2fdc9e3d2cbef 2024-12-03T21:19:24,573 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/data/hbase/meta/1588230740/.tmp/info/3b7d4a2ea53744bebafd4587d3c6ca69 as hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/data/hbase/meta/1588230740/info/3b7d4a2ea53744bebafd4587d3c6ca69 2024-12-03T21:19:24,580 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/data/hbase/meta/1588230740/info/3b7d4a2ea53744bebafd4587d3c6ca69, entries=10, sequenceid=11, filesize=7.0 K 2024-12-03T21:19:24,581 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/data/hbase/meta/1588230740/.tmp/ns/c913b08ab894424aa595ecd2de5ca647 as hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/data/hbase/meta/1588230740/ns/c913b08ab894424aa595ecd2de5ca647 2024-12-03T21:19:24,587 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/data/hbase/meta/1588230740/ns/c913b08ab894424aa595ecd2de5ca647, entries=2, sequenceid=11, filesize=5.0 K 2024-12-03T21:19:24,588 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/data/hbase/meta/1588230740/.tmp/table/f7b122fd0615424a9dd2fdc9e3d2cbef as hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/data/hbase/meta/1588230740/table/f7b122fd0615424a9dd2fdc9e3d2cbef 2024-12-03T21:19:24,595 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/data/hbase/meta/1588230740/table/f7b122fd0615424a9dd2fdc9e3d2cbef, entries=2, sequenceid=11, filesize=5.3 K 2024-12-03T21:19:24,596 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 111ms, sequenceid=11, compaction requested=false 2024-12-03T21:19:24,596 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-03T21:19:24,597 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 4fc8cb446cbadc93395caddbaf2dcc6b 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-03T21:19:24,597 ERROR [FSHLog-0-hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c-prefix:101545f66cbd,33773,1733260738594 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-317319251-172.17.0.2-1733260736457:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:19:24,597 WARN [FSHLog-0-hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c-prefix:101545f66cbd,33773,1733260738594 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-317319251-172.17.0.2-1733260736457:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:19:24,598 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 101545f66cbd%2C33773%2C1733260738594:(num 1733260760435) roll requested 2024-12-03T21:19:24,598 INFO [regionserver/101545f66cbd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C33773%2C1733260738594.1733260764598 2024-12-03T21:19:24,616 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260760435 newFile=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260764598 2024-12-03T21:19:24,617 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:24,617 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:24,617 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:24,617 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:24,617 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:24,617 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260760435 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260764598 2024-12-03T21:19:24,617 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-317319251-172.17.0.2-1733260736457:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:19:24,618 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-317319251-172.17.0.2-1733260736457:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:19:24,618 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260760435 2024-12-03T21:19:24,619 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260760435 after 1ms 2024-12-03T21:19:24,619 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.1733260760435 to hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/oldWALs/101545f66cbd%2C33773%2C1733260738594.1733260760435 2024-12-03T21:19:24,623 DEBUG [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37659:37659),(127.0.0.1/127.0.0.1:37477:37477)] 2024-12-03T21:19:24,636 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/data/default/TestLogRolling-testLogRollOnPipelineRestart/4fc8cb446cbadc93395caddbaf2dcc6b/.tmp/info/6476cdf248aa4261a822b936f17bdb7b is 1080, key is row1002/info:/1733260749811/Put/seqid=0 2024-12-03T21:19:24,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41453 is added to blk_1073741845_1029 (size=9270) 2024-12-03T21:19:24,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42025 is added to blk_1073741845_1029 (size=9270) 2024-12-03T21:19:24,641 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/data/default/TestLogRolling-testLogRollOnPipelineRestart/4fc8cb446cbadc93395caddbaf2dcc6b/.tmp/info/6476cdf248aa4261a822b936f17bdb7b 2024-12-03T21:19:24,647 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/data/default/TestLogRolling-testLogRollOnPipelineRestart/4fc8cb446cbadc93395caddbaf2dcc6b/.tmp/info/6476cdf248aa4261a822b936f17bdb7b as hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/data/default/TestLogRolling-testLogRollOnPipelineRestart/4fc8cb446cbadc93395caddbaf2dcc6b/info/6476cdf248aa4261a822b936f17bdb7b 2024-12-03T21:19:24,653 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/data/default/TestLogRolling-testLogRollOnPipelineRestart/4fc8cb446cbadc93395caddbaf2dcc6b/info/6476cdf248aa4261a822b936f17bdb7b, entries=4, sequenceid=8, filesize=9.1 K 2024-12-03T21:19:24,654 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 4fc8cb446cbadc93395caddbaf2dcc6b in 58ms, sequenceid=8, compaction requested=false 2024-12-03T21:19:24,654 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 4fc8cb446cbadc93395caddbaf2dcc6b: 2024-12-03T21:19:24,660 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-03T21:19:24,660 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T21:19:24,660 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T21:19:24,660 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:19:24,660 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:19:24,660 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T21:19:24,661 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T21:19:24,661 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=522364121, stopped=false 2024-12-03T21:19:24,661 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=101545f66cbd,41581,1733260738434 2024-12-03T21:19:24,668 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33773-0x1019e59b1590001, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T21:19:24,668 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41581-0x1019e59b1590000, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T21:19:24,668 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33773-0x1019e59b1590001, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:19:24,668 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41581-0x1019e59b1590000, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:19:24,668 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T21:19:24,669 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T21:19:24,669 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T21:19:24,669 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:19:24,669 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41581-0x1019e59b1590000, quorum=127.0.0.1:49229, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T21:19:24,669 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33773-0x1019e59b1590001, quorum=127.0.0.1:49229, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T21:19:24,669 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '101545f66cbd,33773,1733260738594' ***** 2024-12-03T21:19:24,669 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T21:19:24,669 INFO [RS:0;101545f66cbd:33773 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T21:19:24,669 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T21:19:24,669 INFO [RS:0;101545f66cbd:33773 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T21:19:24,669 INFO [RS:0;101545f66cbd:33773 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T21:19:24,669 INFO [RS:0;101545f66cbd:33773 {}] regionserver.HRegionServer(3091): Received CLOSE for 4fc8cb446cbadc93395caddbaf2dcc6b 2024-12-03T21:19:24,670 INFO [RS:0;101545f66cbd:33773 {}] regionserver.HRegionServer(959): stopping server 101545f66cbd,33773,1733260738594 2024-12-03T21:19:24,670 INFO [RS:0;101545f66cbd:33773 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T21:19:24,670 INFO [RS:0;101545f66cbd:33773 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;101545f66cbd:33773. 2024-12-03T21:19:24,670 DEBUG [RS:0;101545f66cbd:33773 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T21:19:24,670 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 4fc8cb446cbadc93395caddbaf2dcc6b, disabling compactions & flushes 2024-12-03T21:19:24,670 DEBUG [RS:0;101545f66cbd:33773 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:19:24,670 INFO [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733260739735.4fc8cb446cbadc93395caddbaf2dcc6b. 2024-12-03T21:19:24,670 INFO [RS:0;101545f66cbd:33773 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T21:19:24,670 INFO [RS:0;101545f66cbd:33773 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T21:19:24,670 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733260739735.4fc8cb446cbadc93395caddbaf2dcc6b. 2024-12-03T21:19:24,670 INFO [RS:0;101545f66cbd:33773 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T21:19:24,670 INFO [RS:0;101545f66cbd:33773 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-03T21:19:24,670 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733260739735.4fc8cb446cbadc93395caddbaf2dcc6b. after waiting 0 ms 2024-12-03T21:19:24,670 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733260739735.4fc8cb446cbadc93395caddbaf2dcc6b. 2024-12-03T21:19:24,670 INFO [RS:0;101545f66cbd:33773 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-03T21:19:24,670 DEBUG [RS:0;101545f66cbd:33773 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 4fc8cb446cbadc93395caddbaf2dcc6b=TestLogRolling-testLogRollOnPipelineRestart,,1733260739735.4fc8cb446cbadc93395caddbaf2dcc6b.} 2024-12-03T21:19:24,670 DEBUG [RS:0;101545f66cbd:33773 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 4fc8cb446cbadc93395caddbaf2dcc6b 2024-12-03T21:19:24,670 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T21:19:24,670 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T21:19:24,670 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T21:19:24,670 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T21:19:24,670 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T21:19:24,674 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-03T21:19:24,674 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/data/default/TestLogRolling-testLogRollOnPipelineRestart/4fc8cb446cbadc93395caddbaf2dcc6b/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-12-03T21:19:24,674 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T21:19:24,674 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T21:19:24,674 INFO [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733260739735.4fc8cb446cbadc93395caddbaf2dcc6b. 2024-12-03T21:19:24,674 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733260764670Running coprocessor pre-close hooks at 1733260764670Disabling compacts and flushes for region at 1733260764670Disabling writes for close at 1733260764670Writing region close event to WAL at 1733260764671 (+1 ms)Running coprocessor post-close hooks at 1733260764674 (+3 ms)Closed at 1733260764674 2024-12-03T21:19:24,674 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 4fc8cb446cbadc93395caddbaf2dcc6b: Waiting for close lock at 1733260764670Running coprocessor pre-close hooks at 1733260764670Disabling compacts and flushes for region at 1733260764670Disabling writes for close at 1733260764670Writing region close event to WAL at 1733260764670Running coprocessor post-close hooks at 1733260764674 (+4 ms)Closed at 1733260764674 2024-12-03T21:19:24,675 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-03T21:19:24,675 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733260739735.4fc8cb446cbadc93395caddbaf2dcc6b. 2024-12-03T21:19:24,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:24,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:24,870 INFO [RS:0;101545f66cbd:33773 {}] regionserver.HRegionServer(976): stopping server 101545f66cbd,33773,1733260738594; all regions closed. 2024-12-03T21:19:24,871 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:24,872 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:24,872 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:24,872 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:24,873 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:24,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41453 is added to blk_1073741840_1023 (size=825) 2024-12-03T21:19:24,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42025 is added to blk_1073741840_1023 (size=825) 2024-12-03T21:19:24,972 INFO [regionserver/101545f66cbd:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T21:19:25,035 INFO [regionserver/101545f66cbd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-03T21:19:25,035 INFO [regionserver/101545f66cbd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-03T21:19:25,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:25,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:26,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:26,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:27,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:27,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:27,895 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1015: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-03T21:19:28,414 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T21:19:28,496 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.meta.1733260739539.meta after 4001ms 2024-12-03T21:19:28,496 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/WALs/101545f66cbd,33773,1733260738594/101545f66cbd%2C33773%2C1733260738594.meta.1733260739539.meta to hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/oldWALs/101545f66cbd%2C33773%2C1733260738594.meta.1733260739539.meta 2024-12-03T21:19:28,498 DEBUG [RS:0;101545f66cbd:33773 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/oldWALs 2024-12-03T21:19:28,498 INFO [RS:0;101545f66cbd:33773 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 101545f66cbd%2C33773%2C1733260738594.meta:.meta(num 1733260764486) 2024-12-03T21:19:28,499 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:28,499 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:28,499 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:28,499 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:28,499 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:28,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42025 is added to blk_1073741844_1028 (size=1162) 2024-12-03T21:19:28,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41453 is added to blk_1073741844_1028 (size=1162) 2024-12-03T21:19:28,504 DEBUG [RS:0;101545f66cbd:33773 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/oldWALs 2024-12-03T21:19:28,505 INFO [RS:0;101545f66cbd:33773 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 101545f66cbd%2C33773%2C1733260738594:(num 1733260764598) 2024-12-03T21:19:28,505 DEBUG [RS:0;101545f66cbd:33773 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:19:28,505 INFO [RS:0;101545f66cbd:33773 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T21:19:28,505 INFO [RS:0;101545f66cbd:33773 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T21:19:28,505 INFO [RS:0;101545f66cbd:33773 {}] hbase.ChoreService(370): Chore service for: regionserver/101545f66cbd:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-03T21:19:28,505 INFO [RS:0;101545f66cbd:33773 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T21:19:28,505 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T21:19:28,505 INFO [RS:0;101545f66cbd:33773 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33773 2024-12-03T21:19:28,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33773-0x1019e59b1590001, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/101545f66cbd,33773,1733260738594 2024-12-03T21:19:28,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41581-0x1019e59b1590000, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T21:19:28,525 INFO [RS:0;101545f66cbd:33773 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T21:19:28,535 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [101545f66cbd,33773,1733260738594] 2024-12-03T21:19:28,543 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/101545f66cbd,33773,1733260738594 already deleted, retry=false 2024-12-03T21:19:28,543 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 101545f66cbd,33773,1733260738594 expired; onlineServers=0 2024-12-03T21:19:28,543 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '101545f66cbd,41581,1733260738434' ***** 2024-12-03T21:19:28,543 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T21:19:28,544 INFO [M:0;101545f66cbd:41581 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T21:19:28,544 INFO [M:0;101545f66cbd:41581 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T21:19:28,544 DEBUG [M:0;101545f66cbd:41581 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T21:19:28,544 DEBUG [M:0;101545f66cbd:41581 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T21:19:28,544 DEBUG [master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.large.0-1733260738888 {}] cleaner.HFileCleaner(306): Exit Thread[master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.large.0-1733260738888,5,FailOnTimeoutGroup] 2024-12-03T21:19:28,544 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T21:19:28,544 DEBUG [master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.small.0-1733260738888 {}] cleaner.HFileCleaner(306): Exit Thread[master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.small.0-1733260738888,5,FailOnTimeoutGroup] 2024-12-03T21:19:28,544 INFO [M:0;101545f66cbd:41581 {}] hbase.ChoreService(370): Chore service for: master/101545f66cbd:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-03T21:19:28,544 INFO [M:0;101545f66cbd:41581 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T21:19:28,545 DEBUG [M:0;101545f66cbd:41581 {}] master.HMaster(1795): Stopping service threads 2024-12-03T21:19:28,545 INFO [M:0;101545f66cbd:41581 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T21:19:28,545 INFO [M:0;101545f66cbd:41581 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T21:19:28,545 INFO [M:0;101545f66cbd:41581 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T21:19:28,545 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T21:19:28,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41581-0x1019e59b1590000, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T21:19:28,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41581-0x1019e59b1590000, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:19:28,552 DEBUG [M:0;101545f66cbd:41581 {}] zookeeper.ZKUtil(347): master:41581-0x1019e59b1590000, quorum=127.0.0.1:49229, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-03T21:19:28,552 WARN [M:0;101545f66cbd:41581 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-03T21:19:28,552 INFO [M:0;101545f66cbd:41581 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/.lastflushedseqids 2024-12-03T21:19:28,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42025 is added to blk_1073741846_1030 (size=120) 2024-12-03T21:19:28,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41453 is added to blk_1073741846_1030 (size=120) 2024-12-03T21:19:28,559 INFO [M:0;101545f66cbd:41581 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-03T21:19:28,559 INFO [M:0;101545f66cbd:41581 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T21:19:28,559 DEBUG [M:0;101545f66cbd:41581 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T21:19:28,559 INFO [M:0;101545f66cbd:41581 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:19:28,559 DEBUG [M:0;101545f66cbd:41581 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:19:28,559 DEBUG [M:0;101545f66cbd:41581 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T21:19:28,559 DEBUG [M:0;101545f66cbd:41581 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:19:28,559 INFO [M:0;101545f66cbd:41581 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.18 KB heapSize=29.16 KB 2024-12-03T21:19:28,560 ERROR [FSHLog-0-hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData-prefix:101545f66cbd,41581,1733260738434 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42379,DS-f8da228f-f7c1-402f-b426-5d71fccc0b67,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:19:28,560 WARN [FSHLog-0-hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData-prefix:101545f66cbd,41581,1733260738434 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42379,DS-f8da228f-f7c1-402f-b426-5d71fccc0b67,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:19:28,560 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 101545f66cbd%2C41581%2C1733260738434:(num 1733260738713) roll requested 2024-12-03T21:19:28,560 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C41581%2C1733260738434.1733260768560 2024-12-03T21:19:28,566 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:28,566 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:28,566 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:28,566 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:28,566 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:28,567 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/WALs/101545f66cbd,41581,1733260738434/101545f66cbd%2C41581%2C1733260738434.1733260738713 with entries=53, filesize=26.63 KB; new WAL /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/WALs/101545f66cbd,41581,1733260738434/101545f66cbd%2C41581%2C1733260738434.1733260768560 2024-12-03T21:19:28,567 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42379,DS-f8da228f-f7c1-402f-b426-5d71fccc0b67,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:19:28,567 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42379,DS-f8da228f-f7c1-402f-b426-5d71fccc0b67,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T21:19:28,567 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/WALs/101545f66cbd,41581,1733260738434/101545f66cbd%2C41581%2C1733260738434.1733260738713 2024-12-03T21:19:28,568 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37659:37659),(127.0.0.1/127.0.0.1:37477:37477)] 2024-12-03T21:19:28,568 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/WALs/101545f66cbd,41581,1733260738434/101545f66cbd%2C41581%2C1733260738434.1733260738713 is not closed yet, will try archiving it next time 2024-12-03T21:19:28,568 WARN [IPC Server handler 0 on default port 42369 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/WALs/101545f66cbd,41581,1733260738434/101545f66cbd%2C41581%2C1733260738434.1733260738713 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-12-03T21:19:28,568 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/WALs/101545f66cbd,41581,1733260738434/101545f66cbd%2C41581%2C1733260738434.1733260738713 after 1ms 2024-12-03T21:19:28,585 DEBUG [M:0;101545f66cbd:41581 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/22f8b738510c4466a472efb256f283ba is 82, key is hbase:meta,,1/info:regioninfo/1733260739573/Put/seqid=0 2024-12-03T21:19:28,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42025 is added to blk_1073741848_1033 (size=5672) 2024-12-03T21:19:28,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41453 is added to blk_1073741848_1033 (size=5672) 2024-12-03T21:19:28,590 INFO [M:0;101545f66cbd:41581 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/22f8b738510c4466a472efb256f283ba 2024-12-03T21:19:28,609 DEBUG [M:0;101545f66cbd:41581 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a711ef7707a8448099ea1a4d653356ce is 779, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733260740114/Put/seqid=0 2024-12-03T21:19:28,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41453 is added to blk_1073741849_1034 (size=6119) 2024-12-03T21:19:28,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42025 is added to blk_1073741849_1034 (size=6119) 2024-12-03T21:19:28,614 INFO [M:0;101545f66cbd:41581 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a711ef7707a8448099ea1a4d653356ce 2024-12-03T21:19:28,635 DEBUG [M:0;101545f66cbd:41581 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/183afb5a8e0048eeab0a64f48e5200a0 is 69, key is 101545f66cbd,33773,1733260738594/rs:state/1733260738952/Put/seqid=0 2024-12-03T21:19:28,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33773-0x1019e59b1590001, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T21:19:28,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33773-0x1019e59b1590001, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T21:19:28,635 INFO [RS:0;101545f66cbd:33773 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T21:19:28,635 INFO [RS:0;101545f66cbd:33773 {}] regionserver.HRegionServer(1031): Exiting; stopping=101545f66cbd,33773,1733260738594; zookeeper connection closed. 2024-12-03T21:19:28,636 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3115177b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3115177b 2024-12-03T21:19:28,636 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-03T21:19:28,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41453 is added to blk_1073741850_1035 (size=5156) 2024-12-03T21:19:28,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42025 is added to blk_1073741850_1035 (size=5156) 2024-12-03T21:19:28,640 INFO [M:0;101545f66cbd:41581 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/183afb5a8e0048eeab0a64f48e5200a0 2024-12-03T21:19:28,659 DEBUG [M:0;101545f66cbd:41581 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/24652227eb1e41cdbc40b0e92b850830 is 52, key is load_balancer_on/state:d/1733260739730/Put/seqid=0 2024-12-03T21:19:28,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42025 is added to blk_1073741851_1036 (size=5056) 2024-12-03T21:19:28,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41453 is added to blk_1073741851_1036 (size=5056) 2024-12-03T21:19:28,663 INFO [M:0;101545f66cbd:41581 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/24652227eb1e41cdbc40b0e92b850830 2024-12-03T21:19:28,669 DEBUG [M:0;101545f66cbd:41581 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/22f8b738510c4466a472efb256f283ba as hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/22f8b738510c4466a472efb256f283ba 2024-12-03T21:19:28,673 INFO [M:0;101545f66cbd:41581 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/22f8b738510c4466a472efb256f283ba, entries=8, sequenceid=56, filesize=5.5 K 2024-12-03T21:19:28,674 DEBUG [M:0;101545f66cbd:41581 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a711ef7707a8448099ea1a4d653356ce as hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a711ef7707a8448099ea1a4d653356ce 2024-12-03T21:19:28,680 INFO [M:0;101545f66cbd:41581 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a711ef7707a8448099ea1a4d653356ce, entries=6, sequenceid=56, filesize=6.0 K 2024-12-03T21:19:28,681 DEBUG [M:0;101545f66cbd:41581 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/183afb5a8e0048eeab0a64f48e5200a0 as hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/183afb5a8e0048eeab0a64f48e5200a0 2024-12-03T21:19:28,685 INFO [M:0;101545f66cbd:41581 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/183afb5a8e0048eeab0a64f48e5200a0, entries=1, sequenceid=56, filesize=5.0 K 2024-12-03T21:19:28,686 DEBUG [M:0;101545f66cbd:41581 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/24652227eb1e41cdbc40b0e92b850830 as hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/24652227eb1e41cdbc40b0e92b850830 2024-12-03T21:19:28,690 INFO [M:0;101545f66cbd:41581 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/24652227eb1e41cdbc40b0e92b850830, entries=1, sequenceid=56, filesize=4.9 K 2024-12-03T21:19:28,691 INFO [M:0;101545f66cbd:41581 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 132ms, sequenceid=56, compaction requested=false 2024-12-03T21:19:28,693 INFO [M:0;101545f66cbd:41581 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:19:28,693 DEBUG [M:0;101545f66cbd:41581 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733260768559Disabling compacts and flushes for region at 1733260768559Disabling writes for close at 1733260768559Obtaining lock to block concurrent updates at 1733260768559Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733260768559Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23738, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1733260768560 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733260768568 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733260768568Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733260768584 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733260768584Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733260768595 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733260768608 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733260768609 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733260768619 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733260768634 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733260768634Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733260768644 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733260768658 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733260768658Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3608cb5: reopening flushed file at 1733260768668 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7685d0c6: reopening flushed file at 1733260768674 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@170e8de7: reopening flushed file at 1733260768680 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@372130bd: reopening flushed file at 1733260768685 (+5 ms)Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 132ms, sequenceid=56, compaction requested=false at 1733260768691 (+6 ms)Writing region close event to WAL at 1733260768693 (+2 ms)Closed at 1733260768693 2024-12-03T21:19:28,693 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:28,693 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:28,693 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:28,693 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:28,694 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:19:28,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42025 is added to blk_1073741847_1031 (size=757) 2024-12-03T21:19:28,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41453 is added to blk_1073741847_1031 (size=757) 2024-12-03T21:19:28,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:28,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:29,675 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:29,675 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:29,691 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:29,691 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:29,691 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:29,691 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:29,692 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:29,692 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:29,694 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:29,694 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:29,694 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:29,696 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:29,699 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:29,700 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:29,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:29,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:30,202 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T21:19:30,203 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:30,203 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:30,204 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:30,204 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:30,219 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:30,220 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:30,220 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:30,220 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:30,220 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:30,220 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:30,223 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:30,223 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:30,223 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:30,225 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:30,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:30,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:30,896 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-03T21:19:31,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:31,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:32,569 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/WALs/101545f66cbd,41581,1733260738434/101545f66cbd%2C41581%2C1733260738434.1733260738713 after 4002ms 2024-12-03T21:19:32,569 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/WALs/101545f66cbd,41581,1733260738434/101545f66cbd%2C41581%2C1733260738434.1733260738713 to hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/oldWALs/101545f66cbd%2C41581%2C1733260738434.1733260738713 2024-12-03T21:19:32,572 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/MasterData/oldWALs/101545f66cbd%2C41581%2C1733260738434.1733260738713 to hdfs://localhost:42369/user/jenkins/test-data/a9076177-8e30-5c2b-cc8e-d3e0c6eaee0c/oldWALs/101545f66cbd%2C41581%2C1733260738434.1733260738713$masterlocalwal$ 2024-12-03T21:19:32,572 INFO [M:0;101545f66cbd:41581 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-03T21:19:32,572 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T21:19:32,572 INFO [M:0;101545f66cbd:41581 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41581 2024-12-03T21:19:32,573 INFO [M:0;101545f66cbd:41581 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T21:19:32,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41581-0x1019e59b1590000, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T21:19:32,735 INFO [M:0;101545f66cbd:41581 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T21:19:32,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41581-0x1019e59b1590000, quorum=127.0.0.1:49229, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T21:19:32,737 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@27147a8d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:19:32,737 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2bfa0122{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T21:19:32,738 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T21:19:32,738 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1b3ddecc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T21:19:32,738 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7e047afe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/hadoop.log.dir/,STOPPED} 2024-12-03T21:19:32,739 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T21:19:32,739 WARN [BP-317319251-172.17.0.2-1733260736457 heartbeating to localhost/127.0.0.1:42369 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T21:19:32,739 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T21:19:32,739 WARN [BP-317319251-172.17.0.2-1733260736457 heartbeating to localhost/127.0.0.1:42369 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-317319251-172.17.0.2-1733260736457 (Datanode Uuid 5bd2cc89-e474-4f0b-89d1-e18cbbd6cbf4) service to localhost/127.0.0.1:42369 2024-12-03T21:19:32,739 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/cluster_8c45575c-c5e0-2e31-2f83-6ba9e05c8f38/data/data3/current/BP-317319251-172.17.0.2-1733260736457 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:19:32,739 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/cluster_8c45575c-c5e0-2e31-2f83-6ba9e05c8f38/data/data4/current/BP-317319251-172.17.0.2-1733260736457 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:19:32,739 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T21:19:32,741 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7e276c4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:19:32,742 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1386a0c3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T21:19:32,742 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T21:19:32,742 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7689d41a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T21:19:32,742 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@703ef203{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/hadoop.log.dir/,STOPPED} 2024-12-03T21:19:32,744 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T21:19:32,744 WARN [BP-317319251-172.17.0.2-1733260736457 heartbeating to localhost/127.0.0.1:42369 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T21:19:32,744 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T21:19:32,744 WARN [BP-317319251-172.17.0.2-1733260736457 heartbeating to localhost/127.0.0.1:42369 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-317319251-172.17.0.2-1733260736457 (Datanode Uuid 438aa9b8-2ca8-41c5-b946-3172aa9e8abc) service to localhost/127.0.0.1:42369 2024-12-03T21:19:32,745 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/cluster_8c45575c-c5e0-2e31-2f83-6ba9e05c8f38/data/data1/current/BP-317319251-172.17.0.2-1733260736457 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:19:32,745 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/cluster_8c45575c-c5e0-2e31-2f83-6ba9e05c8f38/data/data2/current/BP-317319251-172.17.0.2-1733260736457 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:19:32,745 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T21:19:32,750 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6d7adad9{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T21:19:32,750 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4da18e2b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T21:19:32,750 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T21:19:32,750 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@101bbbad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T21:19:32,750 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ea9d584{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/hadoop.log.dir/,STOPPED} 2024-12-03T21:19:32,757 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-03T21:19:32,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-03T21:19:32,784 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=180 (was 155) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:42369 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42369 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42369 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42369 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:42369 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:42369 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42369 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:42369 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=457 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=125 (was 157), ProcessCount=11 (was 11), AvailableMemoryMB=1953 (was 2489) 2024-12-03T21:19:32,792 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=180, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=125, ProcessCount=11, AvailableMemoryMB=1953 2024-12-03T21:19:32,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T21:19:32,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/hadoop.log.dir so I do NOT create it in target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a 2024-12-03T21:19:32,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81123fd7-2751-cc3f-186d-9fa43f07ec46/hadoop.tmp.dir so I do NOT create it in target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a 2024-12-03T21:19:32,793 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/cluster_8d784e81-dc09-76a6-88ec-43b8caec4c60, deleteOnExit=true 2024-12-03T21:19:32,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-03T21:19:32,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/test.cache.data in system properties and HBase conf 2024-12-03T21:19:32,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T21:19:32,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/hadoop.log.dir in system properties and HBase conf 2024-12-03T21:19:32,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T21:19:32,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T21:19:32,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-03T21:19:32,793 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T21:19:32,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T21:19:32,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T21:19:32,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T21:19:32,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T21:19:32,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T21:19:32,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T21:19:32,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T21:19:32,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T21:19:32,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T21:19:32,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/nfs.dump.dir in system properties and HBase conf 2024-12-03T21:19:32,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/java.io.tmpdir in system properties and HBase conf 2024-12-03T21:19:32,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T21:19:32,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T21:19:32,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T21:19:32,805 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T21:19:32,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:32,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:33,100 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:19:33,103 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T21:19:33,104 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T21:19:33,104 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T21:19:33,105 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T21:19:33,105 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:19:33,105 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b50defd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/hadoop.log.dir/,AVAILABLE} 2024-12-03T21:19:33,106 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fb6de9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T21:19:33,197 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@153337d2{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/java.io.tmpdir/jetty-localhost-41271-hadoop-hdfs-3_4_1-tests_jar-_-any-1836426337040630044/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T21:19:33,198 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@58d2b6e{HTTP/1.1, (http/1.1)}{localhost:41271} 2024-12-03T21:19:33,198 INFO [Time-limited test {}] server.Server(415): Started @186296ms 2024-12-03T21:19:33,208 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T21:19:33,456 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:19:33,458 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T21:19:33,459 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T21:19:33,459 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T21:19:33,459 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T21:19:33,460 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25b87dc9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/hadoop.log.dir/,AVAILABLE} 2024-12-03T21:19:33,460 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7b82a74b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T21:19:33,559 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@58258a79{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/java.io.tmpdir/jetty-localhost-35003-hadoop-hdfs-3_4_1-tests_jar-_-any-3777260995980284616/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:19:33,559 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@50c8efd9{HTTP/1.1, (http/1.1)}{localhost:35003} 2024-12-03T21:19:33,559 INFO [Time-limited test {}] server.Server(415): Started @186657ms 2024-12-03T21:19:33,560 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T21:19:33,588 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:19:33,592 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T21:19:33,593 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T21:19:33,593 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T21:19:33,593 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T21:19:33,593 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@64c5d398{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/hadoop.log.dir/,AVAILABLE} 2024-12-03T21:19:33,593 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ff0a5a3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T21:19:33,684 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@38e67c7c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/java.io.tmpdir/jetty-localhost-35553-hadoop-hdfs-3_4_1-tests_jar-_-any-8781334113878771715/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:19:33,684 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@713ad6b2{HTTP/1.1, (http/1.1)}{localhost:35553} 2024-12-03T21:19:33,684 INFO [Time-limited test {}] server.Server(415): Started @186782ms 2024-12-03T21:19:33,685 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T21:19:33,707 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T21:19:33,707 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-03T21:19:33,707 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-03T21:19:33,707 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-03T21:19:33,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:33,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:34,453 WARN [Thread-1637 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/cluster_8d784e81-dc09-76a6-88ec-43b8caec4c60/data/data2/current/BP-2020687819-172.17.0.2-1733260772815/current, will proceed with Du for space computation calculation, 2024-12-03T21:19:34,453 WARN [Thread-1636 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/cluster_8d784e81-dc09-76a6-88ec-43b8caec4c60/data/data1/current/BP-2020687819-172.17.0.2-1733260772815/current, will proceed with Du for space computation calculation, 2024-12-03T21:19:34,469 WARN [Thread-1601 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T21:19:34,472 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4f7a43cc4b751054 with lease ID 0xad8f105215537fb: Processing first storage report for DS-40b67fbf-32cf-4527-99b9-8beb5113fa98 from datanode DatanodeRegistration(127.0.0.1:35661, datanodeUuid=4df981b3-fd43-4671-be13-71814bf6ba74, infoPort=36949, infoSecurePort=0, ipcPort=37905, storageInfo=lv=-57;cid=testClusterID;nsid=529061312;c=1733260772815) 2024-12-03T21:19:34,472 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4f7a43cc4b751054 with lease ID 0xad8f105215537fb: from storage DS-40b67fbf-32cf-4527-99b9-8beb5113fa98 node DatanodeRegistration(127.0.0.1:35661, datanodeUuid=4df981b3-fd43-4671-be13-71814bf6ba74, infoPort=36949, infoSecurePort=0, ipcPort=37905, storageInfo=lv=-57;cid=testClusterID;nsid=529061312;c=1733260772815), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:19:34,472 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4f7a43cc4b751054 with lease ID 0xad8f105215537fb: Processing first storage report for DS-1797014f-f451-4a1d-9ff2-d53b7fae57b6 from datanode DatanodeRegistration(127.0.0.1:35661, datanodeUuid=4df981b3-fd43-4671-be13-71814bf6ba74, infoPort=36949, infoSecurePort=0, ipcPort=37905, storageInfo=lv=-57;cid=testClusterID;nsid=529061312;c=1733260772815) 2024-12-03T21:19:34,472 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4f7a43cc4b751054 with lease ID 0xad8f105215537fb: from storage DS-1797014f-f451-4a1d-9ff2-d53b7fae57b6 node DatanodeRegistration(127.0.0.1:35661, datanodeUuid=4df981b3-fd43-4671-be13-71814bf6ba74, infoPort=36949, infoSecurePort=0, ipcPort=37905, storageInfo=lv=-57;cid=testClusterID;nsid=529061312;c=1733260772815), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:19:34,731 WARN [Thread-1649 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/cluster_8d784e81-dc09-76a6-88ec-43b8caec4c60/data/data4/current/BP-2020687819-172.17.0.2-1733260772815/current, will proceed with Du for space computation calculation, 2024-12-03T21:19:34,731 WARN [Thread-1648 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/cluster_8d784e81-dc09-76a6-88ec-43b8caec4c60/data/data3/current/BP-2020687819-172.17.0.2-1733260772815/current, will proceed with Du for space computation calculation, 2024-12-03T21:19:34,748 WARN [Thread-1624 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T21:19:34,750 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2405beb08f5c5393 with lease ID 0xad8f105215537fc: Processing first storage report for DS-ed0a0ad1-ab52-4782-aab6-9cd8d1bc939c from datanode DatanodeRegistration(127.0.0.1:36839, datanodeUuid=9f346f05-c05d-4ef9-8b40-2d2af7ade5ce, infoPort=45959, infoSecurePort=0, ipcPort=40701, storageInfo=lv=-57;cid=testClusterID;nsid=529061312;c=1733260772815) 2024-12-03T21:19:34,750 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2405beb08f5c5393 with lease ID 0xad8f105215537fc: from storage DS-ed0a0ad1-ab52-4782-aab6-9cd8d1bc939c node DatanodeRegistration(127.0.0.1:36839, datanodeUuid=9f346f05-c05d-4ef9-8b40-2d2af7ade5ce, infoPort=45959, infoSecurePort=0, ipcPort=40701, storageInfo=lv=-57;cid=testClusterID;nsid=529061312;c=1733260772815), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:19:34,751 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2405beb08f5c5393 with lease ID 0xad8f105215537fc: Processing first storage report for DS-08b42866-7ceb-46bf-a6c1-971b59f36ca7 from datanode DatanodeRegistration(127.0.0.1:36839, datanodeUuid=9f346f05-c05d-4ef9-8b40-2d2af7ade5ce, infoPort=45959, infoSecurePort=0, ipcPort=40701, storageInfo=lv=-57;cid=testClusterID;nsid=529061312;c=1733260772815) 2024-12-03T21:19:34,751 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2405beb08f5c5393 with lease ID 0xad8f105215537fc: from storage DS-08b42866-7ceb-46bf-a6c1-971b59f36ca7 node DatanodeRegistration(127.0.0.1:36839, datanodeUuid=9f346f05-c05d-4ef9-8b40-2d2af7ade5ce, infoPort=45959, infoSecurePort=0, ipcPort=40701, storageInfo=lv=-57;cid=testClusterID;nsid=529061312;c=1733260772815), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:19:34,816 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a 2024-12-03T21:19:34,821 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/cluster_8d784e81-dc09-76a6-88ec-43b8caec4c60/zookeeper_0, clientPort=51630, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/cluster_8d784e81-dc09-76a6-88ec-43b8caec4c60/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/cluster_8d784e81-dc09-76a6-88ec-43b8caec4c60/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T21:19:34,823 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51630 2024-12-03T21:19:34,823 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:19:34,826 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:19:34,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741825_1001 (size=7) 2024-12-03T21:19:34,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36839 is added to blk_1073741825_1001 (size=7) 2024-12-03T21:19:34,837 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c with version=8 2024-12-03T21:19:34,837 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/hbase-staging 2024-12-03T21:19:34,839 INFO [Time-limited test {}] client.ConnectionUtils(128): master/101545f66cbd:0 server-side Connection retries=45 2024-12-03T21:19:34,839 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T21:19:34,839 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T21:19:34,839 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T21:19:34,839 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T21:19:34,839 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T21:19:34,839 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-03T21:19:34,839 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T21:19:34,840 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41221 2024-12-03T21:19:34,841 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41221 connecting to ZooKeeper ensemble=127.0.0.1:51630 2024-12-03T21:19:34,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:34,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:34,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:412210x0, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T21:19:34,896 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41221-0x1019e5a3f8c0000 connected 2024-12-03T21:19:34,966 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:19:34,968 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:19:34,971 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41221-0x1019e5a3f8c0000, quorum=127.0.0.1:51630, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T21:19:34,971 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c, hbase.cluster.distributed=false 2024-12-03T21:19:34,975 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41221-0x1019e5a3f8c0000, quorum=127.0.0.1:51630, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T21:19:34,975 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41221 2024-12-03T21:19:34,976 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41221 2024-12-03T21:19:34,976 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41221 2024-12-03T21:19:34,977 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41221 2024-12-03T21:19:34,978 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41221 2024-12-03T21:19:34,992 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/101545f66cbd:0 server-side Connection retries=45 2024-12-03T21:19:34,992 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T21:19:34,992 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T21:19:34,993 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T21:19:34,993 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T21:19:34,993 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T21:19:34,993 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T21:19:34,993 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T21:19:34,993 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39847 2024-12-03T21:19:34,994 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39847 connecting to ZooKeeper ensemble=127.0.0.1:51630 2024-12-03T21:19:34,995 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:19:34,996 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:19:35,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:398470x0, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T21:19:35,007 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:398470x0, quorum=127.0.0.1:51630, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T21:19:35,007 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39847-0x1019e5a3f8c0001 connected 2024-12-03T21:19:35,008 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T21:19:35,008 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T21:19:35,009 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39847-0x1019e5a3f8c0001, quorum=127.0.0.1:51630, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T21:19:35,010 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39847-0x1019e5a3f8c0001, quorum=127.0.0.1:51630, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T21:19:35,011 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39847 2024-12-03T21:19:35,011 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39847 2024-12-03T21:19:35,011 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39847 2024-12-03T21:19:35,012 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39847 2024-12-03T21:19:35,012 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39847 2024-12-03T21:19:35,025 DEBUG [M:0;101545f66cbd:41221 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;101545f66cbd:41221 2024-12-03T21:19:35,025 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/101545f66cbd,41221,1733260774839 2024-12-03T21:19:35,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41221-0x1019e5a3f8c0000, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T21:19:35,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x1019e5a3f8c0001, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T21:19:35,032 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41221-0x1019e5a3f8c0000, quorum=127.0.0.1:51630, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/101545f66cbd,41221,1733260774839 2024-12-03T21:19:35,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x1019e5a3f8c0001, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T21:19:35,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41221-0x1019e5a3f8c0000, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:19:35,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x1019e5a3f8c0001, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:19:35,041 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41221-0x1019e5a3f8c0000, quorum=127.0.0.1:51630, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T21:19:35,041 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/101545f66cbd,41221,1733260774839 from backup master directory 2024-12-03T21:19:35,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x1019e5a3f8c0001, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T21:19:35,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41221-0x1019e5a3f8c0000, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/101545f66cbd,41221,1733260774839 2024-12-03T21:19:35,049 WARN [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T21:19:35,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41221-0x1019e5a3f8c0000, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T21:19:35,049 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=101545f66cbd,41221,1733260774839 2024-12-03T21:19:35,054 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/hbase.id] with ID: a8b18494-7818-4a81-acd1-8fb20e722afd 2024-12-03T21:19:35,054 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/.tmp/hbase.id 2024-12-03T21:19:35,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741826_1002 (size=42) 2024-12-03T21:19:35,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36839 is added to blk_1073741826_1002 (size=42) 2024-12-03T21:19:35,061 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/.tmp/hbase.id]:[hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/hbase.id] 2024-12-03T21:19:35,073 INFO [master/101545f66cbd:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:19:35,073 INFO [master/101545f66cbd:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-03T21:19:35,076 INFO [master/101545f66cbd:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 3ms. 2024-12-03T21:19:35,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41221-0x1019e5a3f8c0000, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:19:35,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x1019e5a3f8c0001, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:19:35,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741827_1003 (size=196) 2024-12-03T21:19:35,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36839 is added to blk_1073741827_1003 (size=196) 2024-12-03T21:19:35,094 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T21:19:35,095 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T21:19:35,095 INFO [master/101545f66cbd:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T21:19:35,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741828_1004 (size=1189) 2024-12-03T21:19:35,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36839 is added to blk_1073741828_1004 (size=1189) 2024-12-03T21:19:35,103 INFO [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/MasterData/data/master/store 2024-12-03T21:19:35,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741829_1005 (size=34) 2024-12-03T21:19:35,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36839 is added to blk_1073741829_1005 (size=34) 2024-12-03T21:19:35,110 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:19:35,110 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T21:19:35,110 INFO [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:19:35,110 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:19:35,110 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T21:19:35,110 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:19:35,110 INFO [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:19:35,111 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733260775110Disabling compacts and flushes for region at 1733260775110Disabling writes for close at 1733260775110Writing region close event to WAL at 1733260775110Closed at 1733260775110 2024-12-03T21:19:35,112 WARN [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/MasterData/data/master/store/.initializing 2024-12-03T21:19:35,112 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/MasterData/WALs/101545f66cbd,41221,1733260774839 2024-12-03T21:19:35,115 INFO [master/101545f66cbd:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=101545f66cbd%2C41221%2C1733260774839, suffix=, logDir=hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/MasterData/WALs/101545f66cbd,41221,1733260774839, archiveDir=hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/MasterData/oldWALs, maxLogs=10 2024-12-03T21:19:35,115 INFO [master/101545f66cbd:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C41221%2C1733260774839.1733260775115 2024-12-03T21:19:35,120 INFO [master/101545f66cbd:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/MasterData/WALs/101545f66cbd,41221,1733260774839/101545f66cbd%2C41221%2C1733260774839.1733260775115 2024-12-03T21:19:35,123 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45959:45959),(127.0.0.1/127.0.0.1:36949:36949)] 2024-12-03T21:19:35,127 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T21:19:35,128 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:19:35,128 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:19:35,128 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:19:35,130 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:19:35,131 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T21:19:35,131 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:19:35,132 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:19:35,132 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:19:35,133 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T21:19:35,133 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:19:35,133 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T21:19:35,134 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:19:35,134 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T21:19:35,135 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:19:35,135 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T21:19:35,135 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:19:35,136 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T21:19:35,136 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:19:35,137 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T21:19:35,137 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:19:35,138 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:19:35,138 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:19:35,139 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:19:35,139 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:19:35,140 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T21:19:35,142 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:19:35,144 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T21:19:35,144 INFO [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=723589, jitterRate=-0.07990984618663788}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T21:19:35,145 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733260775128Initializing all the Stores at 1733260775129 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260775129Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260775129Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260775130 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260775130Cleaning up temporary data from old regions at 1733260775139 (+9 ms)Region opened successfully at 1733260775145 (+6 ms) 2024-12-03T21:19:35,145 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T21:19:35,149 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41c880c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=101545f66cbd/172.17.0.2:0 2024-12-03T21:19:35,150 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-03T21:19:35,150 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T21:19:35,150 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T21:19:35,150 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T21:19:35,151 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-03T21:19:35,151 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-03T21:19:35,151 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T21:19:35,153 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T21:19:35,154 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41221-0x1019e5a3f8c0000, quorum=127.0.0.1:51630, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T21:19:35,162 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-03T21:19:35,163 INFO [master/101545f66cbd:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T21:19:35,164 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41221-0x1019e5a3f8c0000, quorum=127.0.0.1:51630, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T21:19:35,173 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-03T21:19:35,174 INFO [master/101545f66cbd:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T21:19:35,175 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41221-0x1019e5a3f8c0000, quorum=127.0.0.1:51630, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T21:19:35,182 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-03T21:19:35,183 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41221-0x1019e5a3f8c0000, quorum=127.0.0.1:51630, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T21:19:35,198 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T21:19:35,206 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41221-0x1019e5a3f8c0000, quorum=127.0.0.1:51630, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T21:19:35,215 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T21:19:35,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41221-0x1019e5a3f8c0000, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T21:19:35,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x1019e5a3f8c0001, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T21:19:35,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x1019e5a3f8c0001, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:19:35,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41221-0x1019e5a3f8c0000, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:19:35,231 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=101545f66cbd,41221,1733260774839, sessionid=0x1019e5a3f8c0000, setting cluster-up flag (Was=false) 2024-12-03T21:19:35,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41221-0x1019e5a3f8c0000, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:19:35,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x1019e5a3f8c0001, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:19:35,283 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T21:19:35,288 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=101545f66cbd,41221,1733260774839 2024-12-03T21:19:35,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41221-0x1019e5a3f8c0000, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:19:35,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x1019e5a3f8c0001, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:19:35,357 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T21:19:35,358 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=101545f66cbd,41221,1733260774839 2024-12-03T21:19:35,359 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-03T21:19:35,361 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-03T21:19:35,361 INFO [master/101545f66cbd:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-03T21:19:35,361 INFO [master/101545f66cbd:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T21:19:35,361 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 101545f66cbd,41221,1733260774839 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T21:19:35,363 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/101545f66cbd:0, corePoolSize=5, maxPoolSize=5 2024-12-03T21:19:35,363 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/101545f66cbd:0, corePoolSize=5, maxPoolSize=5 2024-12-03T21:19:35,363 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/101545f66cbd:0, corePoolSize=5, maxPoolSize=5 2024-12-03T21:19:35,363 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/101545f66cbd:0, corePoolSize=5, maxPoolSize=5 2024-12-03T21:19:35,363 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/101545f66cbd:0, corePoolSize=10, maxPoolSize=10 2024-12-03T21:19:35,363 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:19:35,363 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/101545f66cbd:0, corePoolSize=2, maxPoolSize=2 2024-12-03T21:19:35,363 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:19:35,365 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T21:19:35,365 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-03T21:19:35,367 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:19:35,367 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T21:19:35,367 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733260805367 2024-12-03T21:19:35,367 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T21:19:35,368 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T21:19:35,368 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T21:19:35,368 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T21:19:35,368 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T21:19:35,368 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T21:19:35,368 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T21:19:35,371 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T21:19:35,371 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T21:19:35,372 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T21:19:35,374 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T21:19:35,374 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T21:19:35,374 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.large.0-1733260775374,5,FailOnTimeoutGroup] 2024-12-03T21:19:35,374 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.small.0-1733260775374,5,FailOnTimeoutGroup] 2024-12-03T21:19:35,374 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T21:19:35,374 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T21:19:35,374 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T21:19:35,375 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T21:19:35,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741831_1007 (size=1321) 2024-12-03T21:19:35,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36839 is added to blk_1073741831_1007 (size=1321) 2024-12-03T21:19:35,382 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-03T21:19:35,382 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c 2024-12-03T21:19:35,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741832_1008 (size=32) 2024-12-03T21:19:35,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36839 is added to blk_1073741832_1008 (size=32) 2024-12-03T21:19:35,391 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:19:35,393 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T21:19:35,394 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T21:19:35,394 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:19:35,394 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:19:35,395 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T21:19:35,396 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T21:19:35,396 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:19:35,396 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:19:35,396 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T21:19:35,397 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T21:19:35,397 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:19:35,398 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:19:35,398 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T21:19:35,399 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T21:19:35,399 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:19:35,400 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:19:35,400 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T21:19:35,400 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/hbase/meta/1588230740 2024-12-03T21:19:35,401 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/hbase/meta/1588230740 2024-12-03T21:19:35,402 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T21:19:35,402 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T21:19:35,403 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T21:19:35,404 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T21:19:35,406 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T21:19:35,407 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=799283, jitterRate=0.016341790556907654}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T21:19:35,407 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733260775391Initializing all the Stores at 1733260775392 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260775392Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260775392Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260775392Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260775392Cleaning up temporary data from old regions at 1733260775402 (+10 ms)Region opened successfully at 1733260775407 (+5 ms) 2024-12-03T21:19:35,407 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T21:19:35,407 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T21:19:35,407 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T21:19:35,408 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T21:19:35,408 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T21:19:35,408 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T21:19:35,408 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733260775407Disabling compacts and flushes for region at 1733260775407Disabling writes for close at 1733260775408 (+1 ms)Writing region close event to WAL at 1733260775408Closed at 1733260775408 2024-12-03T21:19:35,409 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T21:19:35,409 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-03T21:19:35,410 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T21:19:35,411 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T21:19:35,412 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T21:19:35,413 INFO [RS:0;101545f66cbd:39847 {}] regionserver.HRegionServer(746): ClusterId : a8b18494-7818-4a81-acd1-8fb20e722afd 2024-12-03T21:19:35,413 DEBUG [RS:0;101545f66cbd:39847 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T21:19:35,424 DEBUG [RS:0;101545f66cbd:39847 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T21:19:35,424 DEBUG [RS:0;101545f66cbd:39847 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T21:19:35,441 DEBUG [RS:0;101545f66cbd:39847 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T21:19:35,441 DEBUG [RS:0;101545f66cbd:39847 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a44d0e7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=101545f66cbd/172.17.0.2:0 2024-12-03T21:19:35,455 DEBUG [RS:0;101545f66cbd:39847 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;101545f66cbd:39847 2024-12-03T21:19:35,455 INFO [RS:0;101545f66cbd:39847 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T21:19:35,455 INFO [RS:0;101545f66cbd:39847 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T21:19:35,455 DEBUG [RS:0;101545f66cbd:39847 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T21:19:35,456 INFO [RS:0;101545f66cbd:39847 {}] regionserver.HRegionServer(2659): reportForDuty to master=101545f66cbd,41221,1733260774839 with port=39847, startcode=1733260774992 2024-12-03T21:19:35,456 DEBUG [RS:0;101545f66cbd:39847 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T21:19:35,458 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57021, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T21:19:35,459 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41221 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 101545f66cbd,39847,1733260774992 2024-12-03T21:19:35,459 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41221 {}] master.ServerManager(517): Registering regionserver=101545f66cbd,39847,1733260774992 2024-12-03T21:19:35,460 DEBUG [RS:0;101545f66cbd:39847 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c 2024-12-03T21:19:35,461 DEBUG [RS:0;101545f66cbd:39847 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38247 2024-12-03T21:19:35,461 DEBUG [RS:0;101545f66cbd:39847 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T21:19:35,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41221-0x1019e5a3f8c0000, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T21:19:35,474 DEBUG [RS:0;101545f66cbd:39847 {}] zookeeper.ZKUtil(111): regionserver:39847-0x1019e5a3f8c0001, quorum=127.0.0.1:51630, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/101545f66cbd,39847,1733260774992 2024-12-03T21:19:35,474 WARN [RS:0;101545f66cbd:39847 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T21:19:35,474 INFO [RS:0;101545f66cbd:39847 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T21:19:35,474 DEBUG [RS:0;101545f66cbd:39847 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/WALs/101545f66cbd,39847,1733260774992 2024-12-03T21:19:35,474 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [101545f66cbd,39847,1733260774992] 2024-12-03T21:19:35,478 INFO [RS:0;101545f66cbd:39847 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T21:19:35,479 INFO [RS:0;101545f66cbd:39847 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T21:19:35,480 INFO [RS:0;101545f66cbd:39847 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T21:19:35,480 INFO [RS:0;101545f66cbd:39847 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T21:19:35,480 INFO [RS:0;101545f66cbd:39847 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T21:19:35,481 INFO [RS:0;101545f66cbd:39847 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T21:19:35,481 INFO [RS:0;101545f66cbd:39847 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T21:19:35,481 DEBUG [RS:0;101545f66cbd:39847 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:19:35,481 DEBUG [RS:0;101545f66cbd:39847 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:19:35,481 DEBUG [RS:0;101545f66cbd:39847 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:19:35,481 DEBUG [RS:0;101545f66cbd:39847 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:19:35,481 DEBUG [RS:0;101545f66cbd:39847 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:19:35,481 DEBUG [RS:0;101545f66cbd:39847 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/101545f66cbd:0, corePoolSize=2, maxPoolSize=2 2024-12-03T21:19:35,481 DEBUG [RS:0;101545f66cbd:39847 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:19:35,481 DEBUG [RS:0;101545f66cbd:39847 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:19:35,481 DEBUG [RS:0;101545f66cbd:39847 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:19:35,481 DEBUG [RS:0;101545f66cbd:39847 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:19:35,481 DEBUG [RS:0;101545f66cbd:39847 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:19:35,481 DEBUG [RS:0;101545f66cbd:39847 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:19:35,481 DEBUG [RS:0;101545f66cbd:39847 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/101545f66cbd:0, corePoolSize=3, maxPoolSize=3 2024-12-03T21:19:35,481 DEBUG [RS:0;101545f66cbd:39847 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0, corePoolSize=3, maxPoolSize=3 2024-12-03T21:19:35,482 INFO [RS:0;101545f66cbd:39847 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T21:19:35,482 INFO [RS:0;101545f66cbd:39847 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T21:19:35,482 INFO [RS:0;101545f66cbd:39847 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T21:19:35,482 INFO [RS:0;101545f66cbd:39847 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T21:19:35,482 INFO [RS:0;101545f66cbd:39847 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T21:19:35,482 INFO [RS:0;101545f66cbd:39847 {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,39847,1733260774992-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T21:19:35,495 INFO [RS:0;101545f66cbd:39847 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T21:19:35,495 INFO [RS:0;101545f66cbd:39847 {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,39847,1733260774992-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T21:19:35,495 INFO [RS:0;101545f66cbd:39847 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:19:35,495 INFO [RS:0;101545f66cbd:39847 {}] regionserver.Replication(171): 101545f66cbd,39847,1733260774992 started 2024-12-03T21:19:35,508 INFO [RS:0;101545f66cbd:39847 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:19:35,508 INFO [RS:0;101545f66cbd:39847 {}] regionserver.HRegionServer(1482): Serving as 101545f66cbd,39847,1733260774992, RpcServer on 101545f66cbd/172.17.0.2:39847, sessionid=0x1019e5a3f8c0001 2024-12-03T21:19:35,508 DEBUG [RS:0;101545f66cbd:39847 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T21:19:35,508 DEBUG [RS:0;101545f66cbd:39847 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 101545f66cbd,39847,1733260774992 2024-12-03T21:19:35,508 DEBUG [RS:0;101545f66cbd:39847 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '101545f66cbd,39847,1733260774992' 2024-12-03T21:19:35,508 DEBUG [RS:0;101545f66cbd:39847 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T21:19:35,508 DEBUG [RS:0;101545f66cbd:39847 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T21:19:35,509 DEBUG [RS:0;101545f66cbd:39847 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T21:19:35,509 DEBUG [RS:0;101545f66cbd:39847 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T21:19:35,509 DEBUG [RS:0;101545f66cbd:39847 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 101545f66cbd,39847,1733260774992 2024-12-03T21:19:35,509 DEBUG [RS:0;101545f66cbd:39847 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '101545f66cbd,39847,1733260774992' 2024-12-03T21:19:35,509 DEBUG [RS:0;101545f66cbd:39847 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T21:19:35,509 DEBUG [RS:0;101545f66cbd:39847 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T21:19:35,510 DEBUG [RS:0;101545f66cbd:39847 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T21:19:35,510 INFO [RS:0;101545f66cbd:39847 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T21:19:35,510 INFO [RS:0;101545f66cbd:39847 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T21:19:35,563 WARN [101545f66cbd:41221 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-03T21:19:35,612 INFO [RS:0;101545f66cbd:39847 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=101545f66cbd%2C39847%2C1733260774992, suffix=, logDir=hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/WALs/101545f66cbd,39847,1733260774992, archiveDir=hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/oldWALs, maxLogs=32 2024-12-03T21:19:35,612 INFO [RS:0;101545f66cbd:39847 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C39847%2C1733260774992.1733260775612 2024-12-03T21:19:35,621 INFO [RS:0;101545f66cbd:39847 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/WALs/101545f66cbd,39847,1733260774992/101545f66cbd%2C39847%2C1733260774992.1733260775612 2024-12-03T21:19:35,625 DEBUG [RS:0;101545f66cbd:39847 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36949:36949),(127.0.0.1/127.0.0.1:45959:45959)] 2024-12-03T21:19:35,813 DEBUG [101545f66cbd:41221 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-03T21:19:35,814 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=101545f66cbd,39847,1733260774992 2024-12-03T21:19:35,817 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 101545f66cbd,39847,1733260774992, state=OPENING 2024-12-03T21:19:35,824 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T21:19:35,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41221-0x1019e5a3f8c0000, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:19:35,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x1019e5a3f8c0001, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:19:35,834 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T21:19:35,835 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=101545f66cbd,39847,1733260774992}] 2024-12-03T21:19:35,835 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T21:19:35,835 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T21:19:35,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:35,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:35,988 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T21:19:35,991 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60711, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T21:19:35,996 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-03T21:19:35,996 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T21:19:35,999 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=101545f66cbd%2C39847%2C1733260774992.meta, suffix=.meta, logDir=hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/WALs/101545f66cbd,39847,1733260774992, archiveDir=hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/oldWALs, maxLogs=32 2024-12-03T21:19:36,000 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C39847%2C1733260774992.meta.1733260775999.meta 2024-12-03T21:19:36,007 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/WALs/101545f66cbd,39847,1733260774992/101545f66cbd%2C39847%2C1733260774992.meta.1733260775999.meta 2024-12-03T21:19:36,008 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45959:45959),(127.0.0.1/127.0.0.1:36949:36949)] 2024-12-03T21:19:36,010 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T21:19:36,010 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T21:19:36,010 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T21:19:36,010 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T21:19:36,010 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T21:19:36,010 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:19:36,011 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-03T21:19:36,011 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-03T21:19:36,013 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T21:19:36,014 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T21:19:36,014 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:19:36,015 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:19:36,015 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T21:19:36,016 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T21:19:36,016 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:19:36,016 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:19:36,016 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T21:19:36,017 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T21:19:36,017 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:19:36,017 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:19:36,017 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T21:19:36,018 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T21:19:36,018 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:19:36,018 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:19:36,019 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T21:19:36,019 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/hbase/meta/1588230740 2024-12-03T21:19:36,020 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/hbase/meta/1588230740 2024-12-03T21:19:36,022 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T21:19:36,022 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T21:19:36,022 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T21:19:36,023 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T21:19:36,024 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=869001, jitterRate=0.10499212145805359}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T21:19:36,024 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-03T21:19:36,025 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733260776011Writing region info on filesystem at 1733260776011Initializing all the Stores at 1733260776012 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260776012Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260776013 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260776013Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260776013Cleaning up temporary data from old regions at 1733260776022 (+9 ms)Running coprocessor post-open hooks at 1733260776024 (+2 ms)Region opened successfully at 1733260776025 (+1 ms) 2024-12-03T21:19:36,026 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733260775988 2024-12-03T21:19:36,028 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T21:19:36,028 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-03T21:19:36,029 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=101545f66cbd,39847,1733260774992 2024-12-03T21:19:36,030 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 101545f66cbd,39847,1733260774992, state=OPEN 2024-12-03T21:19:36,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41221-0x1019e5a3f8c0000, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T21:19:36,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x1019e5a3f8c0001, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T21:19:36,086 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=101545f66cbd,39847,1733260774992 2024-12-03T21:19:36,086 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T21:19:36,086 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T21:19:36,090 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T21:19:36,090 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=101545f66cbd,39847,1733260774992 in 252 msec 2024-12-03T21:19:36,095 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T21:19:36,095 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 680 msec 2024-12-03T21:19:36,096 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T21:19:36,096 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-03T21:19:36,098 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T21:19:36,098 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=101545f66cbd,39847,1733260774992, seqNum=-1] 2024-12-03T21:19:36,098 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T21:19:36,101 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33239, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T21:19:36,107 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 745 msec 2024-12-03T21:19:36,107 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733260776107, completionTime=-1 2024-12-03T21:19:36,107 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-03T21:19:36,107 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-03T21:19:36,109 INFO [master/101545f66cbd:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-03T21:19:36,109 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733260836109 2024-12-03T21:19:36,109 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733260896109 2024-12-03T21:19:36,109 INFO [master/101545f66cbd:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-03T21:19:36,110 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,41221,1733260774839-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T21:19:36,110 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,41221,1733260774839-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:19:36,110 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,41221,1733260774839-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:19:36,110 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-101545f66cbd:41221, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:19:36,110 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T21:19:36,110 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-03T21:19:36,112 DEBUG [master/101545f66cbd:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-03T21:19:36,113 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.064sec 2024-12-03T21:19:36,113 INFO [master/101545f66cbd:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T21:19:36,113 INFO [master/101545f66cbd:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T21:19:36,113 INFO [master/101545f66cbd:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T21:19:36,113 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T21:19:36,113 INFO [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T21:19:36,113 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,41221,1733260774839-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T21:19:36,113 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,41221,1733260774839-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T21:19:36,116 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-03T21:19:36,116 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T21:19:36,116 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,41221,1733260774839-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:19:36,214 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@84084fa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T21:19:36,214 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 101545f66cbd,41221,-1 for getting cluster id 2024-12-03T21:19:36,214 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T21:19:36,217 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a8b18494-7818-4a81-acd1-8fb20e722afd' 2024-12-03T21:19:36,217 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T21:19:36,217 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a8b18494-7818-4a81-acd1-8fb20e722afd" 2024-12-03T21:19:36,217 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@396bf085, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T21:19:36,217 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [101545f66cbd,41221,-1] 2024-12-03T21:19:36,218 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T21:19:36,218 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:19:36,219 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36144, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T21:19:36,221 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78196443, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T21:19:36,222 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T21:19:36,224 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=101545f66cbd,39847,1733260774992, seqNum=-1] 2024-12-03T21:19:36,224 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T21:19:36,225 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52074, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T21:19:36,228 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=101545f66cbd,41221,1733260774839 2024-12-03T21:19:36,228 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:19:36,231 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-03T21:19:36,232 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-03T21:19:36,232 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 101545f66cbd,41221,1733260774839 2024-12-03T21:19:36,233 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@ac02d0a 2024-12-03T21:19:36,233 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T21:19:36,234 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36148, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T21:19:36,234 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41221 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-03T21:19:36,234 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41221 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-03T21:19:36,234 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41221 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T21:19:36,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41221 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T21:19:36,237 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T21:19:36,237 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:19:36,237 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41221 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-12-03T21:19:36,238 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T21:19:36,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41221 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T21:19:36,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741835_1011 (size=405) 2024-12-03T21:19:36,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36839 is added to blk_1073741835_1011 (size=405) 2024-12-03T21:19:36,246 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 293cfd4133ce8faac7938c16338c6df4, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c 2024-12-03T21:19:36,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741836_1012 (size=88) 2024-12-03T21:19:36,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36839 is added to blk_1073741836_1012 (size=88) 2024-12-03T21:19:36,252 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:19:36,252 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 293cfd4133ce8faac7938c16338c6df4, disabling compactions & flushes 2024-12-03T21:19:36,252 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4. 2024-12-03T21:19:36,252 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4. 2024-12-03T21:19:36,252 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4. after waiting 0 ms 2024-12-03T21:19:36,252 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4. 2024-12-03T21:19:36,253 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4. 2024-12-03T21:19:36,253 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 293cfd4133ce8faac7938c16338c6df4: Waiting for close lock at 1733260776252Disabling compacts and flushes for region at 1733260776252Disabling writes for close at 1733260776252Writing region close event to WAL at 1733260776252Closed at 1733260776252 2024-12-03T21:19:36,254 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T21:19:36,254 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1733260776254"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733260776254"}]},"ts":"1733260776254"} 2024-12-03T21:19:36,256 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-03T21:19:36,257 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T21:19:36,257 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733260776257"}]},"ts":"1733260776257"} 2024-12-03T21:19:36,259 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-12-03T21:19:36,259 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=293cfd4133ce8faac7938c16338c6df4, ASSIGN}] 2024-12-03T21:19:36,260 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=293cfd4133ce8faac7938c16338c6df4, ASSIGN 2024-12-03T21:19:36,261 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=293cfd4133ce8faac7938c16338c6df4, ASSIGN; state=OFFLINE, location=101545f66cbd,39847,1733260774992; forceNewPlan=false, retain=false 2024-12-03T21:19:36,412 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=293cfd4133ce8faac7938c16338c6df4, regionState=OPENING, regionLocation=101545f66cbd,39847,1733260774992 2024-12-03T21:19:36,415 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=293cfd4133ce8faac7938c16338c6df4, ASSIGN because future has completed 2024-12-03T21:19:36,417 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 293cfd4133ce8faac7938c16338c6df4, server=101545f66cbd,39847,1733260774992}] 2024-12-03T21:19:36,575 INFO [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4. 2024-12-03T21:19:36,576 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 293cfd4133ce8faac7938c16338c6df4, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4.', STARTKEY => '', ENDKEY => ''} 2024-12-03T21:19:36,576 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 293cfd4133ce8faac7938c16338c6df4 2024-12-03T21:19:36,576 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:19:36,576 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 293cfd4133ce8faac7938c16338c6df4 2024-12-03T21:19:36,576 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 293cfd4133ce8faac7938c16338c6df4 2024-12-03T21:19:36,579 INFO [StoreOpener-293cfd4133ce8faac7938c16338c6df4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 293cfd4133ce8faac7938c16338c6df4 2024-12-03T21:19:36,582 INFO [StoreOpener-293cfd4133ce8faac7938c16338c6df4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 293cfd4133ce8faac7938c16338c6df4 columnFamilyName info 2024-12-03T21:19:36,582 DEBUG [StoreOpener-293cfd4133ce8faac7938c16338c6df4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:19:36,583 INFO [StoreOpener-293cfd4133ce8faac7938c16338c6df4-1 {}] regionserver.HStore(327): Store=293cfd4133ce8faac7938c16338c6df4/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T21:19:36,583 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 293cfd4133ce8faac7938c16338c6df4 2024-12-03T21:19:36,585 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4 2024-12-03T21:19:36,585 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4 2024-12-03T21:19:36,586 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 293cfd4133ce8faac7938c16338c6df4 2024-12-03T21:19:36,586 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 293cfd4133ce8faac7938c16338c6df4 2024-12-03T21:19:36,588 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 293cfd4133ce8faac7938c16338c6df4 2024-12-03T21:19:36,590 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T21:19:36,591 INFO [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 293cfd4133ce8faac7938c16338c6df4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=867502, jitterRate=0.10308602452278137}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T21:19:36,591 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 293cfd4133ce8faac7938c16338c6df4 2024-12-03T21:19:36,592 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 293cfd4133ce8faac7938c16338c6df4: Running coprocessor pre-open hook at 1733260776577Writing region info on filesystem at 1733260776577Initializing all the Stores at 1733260776578 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260776578Cleaning up temporary data from old regions at 1733260776586 (+8 ms)Running coprocessor post-open hooks at 1733260776591 (+5 ms)Region opened successfully at 1733260776592 (+1 ms) 2024-12-03T21:19:36,593 INFO [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4., pid=6, masterSystemTime=1733260776570 2024-12-03T21:19:36,595 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4. 2024-12-03T21:19:36,596 INFO [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4. 2024-12-03T21:19:36,597 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=293cfd4133ce8faac7938c16338c6df4, regionState=OPEN, openSeqNum=2, regionLocation=101545f66cbd,39847,1733260774992 2024-12-03T21:19:36,599 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 293cfd4133ce8faac7938c16338c6df4, server=101545f66cbd,39847,1733260774992 because future has completed 2024-12-03T21:19:36,604 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-03T21:19:36,604 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 293cfd4133ce8faac7938c16338c6df4, server=101545f66cbd,39847,1733260774992 in 185 msec 2024-12-03T21:19:36,607 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-03T21:19:36,607 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=293cfd4133ce8faac7938c16338c6df4, ASSIGN in 345 msec 2024-12-03T21:19:36,608 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T21:19:36,609 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733260776608"}]},"ts":"1733260776608"} 2024-12-03T21:19:36,611 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-12-03T21:19:36,612 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T21:19:36,615 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 377 msec 2024-12-03T21:19:36,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:36,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:37,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:37,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:38,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:38,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:39,210 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T21:19:39,211 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:39,211 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:39,212 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:39,212 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:39,212 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:39,212 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:39,227 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:39,227 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:39,228 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:39,228 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:39,228 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:39,228 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:39,231 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:39,231 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:39,231 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:39,233 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:19:39,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:39,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:40,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:40,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:41,478 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-03T21:19:41,478 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-12-03T21:19:41,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:41,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:42,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:42,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:43,707 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-03T21:19:43,707 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-03T21:19:43,709 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T21:19:43,709 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-03T21:19:43,709 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-03T21:19:43,709 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-03T21:19:43,710 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T21:19:43,710 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-12-03T21:19:43,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:43,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:44,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:44,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:45,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:45,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:46,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41221 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T21:19:46,312 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-12-03T21:19:46,313 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-03T21:19:46,316 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T21:19:46,316 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4. 2024-12-03T21:19:46,321 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4., hostname=101545f66cbd,39847,1733260774992, seqNum=2] 2024-12-03T21:19:46,332 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41221 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T21:19:46,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41221 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T21:19:46,345 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-03T21:19:46,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41221 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-03T21:19:46,349 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T21:19:46,351 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T21:19:46,515 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39847 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-03T21:19:46,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4. 2024-12-03T21:19:46,515 INFO [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 293cfd4133ce8faac7938c16338c6df4 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-03T21:19:46,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/.tmp/info/7985aa057b614590a602906fb80de7e6 is 1080, key is row0001/info:/1733260786323/Put/seqid=0 2024-12-03T21:19:46,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741837_1013 (size=6033) 2024-12-03T21:19:46,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36839 is added to blk_1073741837_1013 (size=6033) 2024-12-03T21:19:46,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:46,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:46,970 INFO [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/.tmp/info/7985aa057b614590a602906fb80de7e6 2024-12-03T21:19:46,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/.tmp/info/7985aa057b614590a602906fb80de7e6 as hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/info/7985aa057b614590a602906fb80de7e6 2024-12-03T21:19:46,993 INFO [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/info/7985aa057b614590a602906fb80de7e6, entries=1, sequenceid=5, filesize=5.9 K 2024-12-03T21:19:46,995 INFO [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 293cfd4133ce8faac7938c16338c6df4 in 479ms, sequenceid=5, compaction requested=false 2024-12-03T21:19:46,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 293cfd4133ce8faac7938c16338c6df4: 2024-12-03T21:19:46,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4. 2024-12-03T21:19:46,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-03T21:19:46,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41221 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-03T21:19:47,003 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-03T21:19:47,003 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 649 msec 2024-12-03T21:19:47,006 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 670 msec 2024-12-03T21:19:47,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:47,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:48,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:48,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:49,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:49,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:50,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:50,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:51,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:51,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:52,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:52,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:53,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:53,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:54,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:54,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:55,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:55,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:56,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41221 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-03T21:19:56,445 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-03T21:19:56,448 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41221 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T21:19:56,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41221 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T21:19:56,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41221 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-03T21:19:56,450 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-03T21:19:56,452 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T21:19:56,452 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T21:19:56,605 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39847 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-12-03T21:19:56,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4. 2024-12-03T21:19:56,606 INFO [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 293cfd4133ce8faac7938c16338c6df4 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-03T21:19:56,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/.tmp/info/5059f79bc7f44897a9a534100dd726f1 is 1080, key is row0002/info:/1733260796446/Put/seqid=0 2024-12-03T21:19:56,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741838_1014 (size=6033) 2024-12-03T21:19:56,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36839 is added to blk_1073741838_1014 (size=6033) 2024-12-03T21:19:56,626 INFO [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/.tmp/info/5059f79bc7f44897a9a534100dd726f1 2024-12-03T21:19:56,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/.tmp/info/5059f79bc7f44897a9a534100dd726f1 as hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/info/5059f79bc7f44897a9a534100dd726f1 2024-12-03T21:19:56,641 INFO [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/info/5059f79bc7f44897a9a534100dd726f1, entries=1, sequenceid=9, filesize=5.9 K 2024-12-03T21:19:56,644 INFO [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 293cfd4133ce8faac7938c16338c6df4 in 38ms, sequenceid=9, compaction requested=false 2024-12-03T21:19:56,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 293cfd4133ce8faac7938c16338c6df4: 2024-12-03T21:19:56,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4. 2024-12-03T21:19:56,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-12-03T21:19:56,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41221 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-12-03T21:19:56,652 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-03T21:19:56,652 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 197 msec 2024-12-03T21:19:56,656 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 205 msec 2024-12-03T21:19:56,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:56,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:57,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:57,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:58,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:58,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 after 68057ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor204.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:19:58,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:58,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta after 68048ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor204.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T21:19:59,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:19:59,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:00,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:00,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:01,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:01,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:02,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:02,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:03,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:03,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:04,816 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T21:20:04,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:04,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:05,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:05,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:06,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41221 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-03T21:20:06,536 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-03T21:20:06,543 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C39847%2C1733260774992.1733260806543 2024-12-03T21:20:06,552 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:20:06,552 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:20:06,552 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:20:06,552 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:20:06,552 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:20:06,552 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/WALs/101545f66cbd,39847,1733260774992/101545f66cbd%2C39847%2C1733260774992.1733260775612 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/WALs/101545f66cbd,39847,1733260774992/101545f66cbd%2C39847%2C1733260774992.1733260806543 2024-12-03T21:20:06,554 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36949:36949),(127.0.0.1/127.0.0.1:45959:45959)] 2024-12-03T21:20:06,554 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/WALs/101545f66cbd,39847,1733260774992/101545f66cbd%2C39847%2C1733260774992.1733260775612 is not closed yet, will try archiving it next time 2024-12-03T21:20:06,555 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41221 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T21:20:06,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36839 is added to blk_1073741833_1009 (size=5546) 2024-12-03T21:20:06,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741833_1009 (size=5546) 2024-12-03T21:20:06,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41221 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T21:20:06,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41221 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-03T21:20:06,558 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-03T21:20:06,559 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T21:20:06,559 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T21:20:06,713 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39847 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-12-03T21:20:06,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4. 2024-12-03T21:20:06,713 INFO [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 293cfd4133ce8faac7938c16338c6df4 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-03T21:20:06,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/.tmp/info/0819216a3aed462082a4007812d2eb98 is 1080, key is row0003/info:/1733260806539/Put/seqid=0 2024-12-03T21:20:06,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36839 is added to blk_1073741840_1016 (size=6033) 2024-12-03T21:20:06,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741840_1016 (size=6033) 2024-12-03T21:20:06,724 INFO [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/.tmp/info/0819216a3aed462082a4007812d2eb98 2024-12-03T21:20:06,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/.tmp/info/0819216a3aed462082a4007812d2eb98 as hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/info/0819216a3aed462082a4007812d2eb98 2024-12-03T21:20:06,735 INFO [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/info/0819216a3aed462082a4007812d2eb98, entries=1, sequenceid=13, filesize=5.9 K 2024-12-03T21:20:06,736 INFO [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 293cfd4133ce8faac7938c16338c6df4 in 23ms, sequenceid=13, compaction requested=true 2024-12-03T21:20:06,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 293cfd4133ce8faac7938c16338c6df4: 2024-12-03T21:20:06,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4. 2024-12-03T21:20:06,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-12-03T21:20:06,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41221 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-12-03T21:20:06,741 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-12-03T21:20:06,741 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 179 msec 2024-12-03T21:20:06,745 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 187 msec 2024-12-03T21:20:06,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:06,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:07,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:07,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:08,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:08,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:09,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:09,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:10,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:10,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:11,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:11,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:12,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:12,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:13,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:13,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:14,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:14,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:15,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:15,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:16,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41221 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-03T21:20:16,596 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-03T21:20:16,597 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T21:20:16,599 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T21:20:16,600 DEBUG [Time-limited test {}] regionserver.HStore(1541): 293cfd4133ce8faac7938c16338c6df4/info is initiating minor compaction (all files) 2024-12-03T21:20:16,600 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T21:20:16,600 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T21:20:16,600 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 293cfd4133ce8faac7938c16338c6df4/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4. 2024-12-03T21:20:16,600 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/info/7985aa057b614590a602906fb80de7e6, hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/info/5059f79bc7f44897a9a534100dd726f1, hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/info/0819216a3aed462082a4007812d2eb98] into tmpdir=hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/.tmp, totalSize=17.7 K 2024-12-03T21:20:16,602 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 7985aa057b614590a602906fb80de7e6, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733260786323 2024-12-03T21:20:16,603 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 5059f79bc7f44897a9a534100dd726f1, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1733260796446 2024-12-03T21:20:16,604 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 0819216a3aed462082a4007812d2eb98, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733260806539 2024-12-03T21:20:16,618 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 293cfd4133ce8faac7938c16338c6df4#info#compaction#44 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T21:20:16,619 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/.tmp/info/017b832fc2d64f75965e53c54bb1ca9e is 1080, key is row0001/info:/1733260786323/Put/seqid=0 2024-12-03T21:20:16,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36839 is added to blk_1073741841_1017 (size=8296) 2024-12-03T21:20:16,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741841_1017 (size=8296) 2024-12-03T21:20:16,632 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/.tmp/info/017b832fc2d64f75965e53c54bb1ca9e as hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/info/017b832fc2d64f75965e53c54bb1ca9e 2024-12-03T21:20:16,638 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 293cfd4133ce8faac7938c16338c6df4/info of 293cfd4133ce8faac7938c16338c6df4 into 017b832fc2d64f75965e53c54bb1ca9e(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T21:20:16,638 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 293cfd4133ce8faac7938c16338c6df4: 2024-12-03T21:20:16,641 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C39847%2C1733260774992.1733260816641 2024-12-03T21:20:16,647 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:20:16,647 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:20:16,647 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:20:16,647 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:20:16,647 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:20:16,647 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/WALs/101545f66cbd,39847,1733260774992/101545f66cbd%2C39847%2C1733260774992.1733260806543 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/WALs/101545f66cbd,39847,1733260774992/101545f66cbd%2C39847%2C1733260774992.1733260816641 2024-12-03T21:20:16,648 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45959:45959),(127.0.0.1/127.0.0.1:36949:36949)] 2024-12-03T21:20:16,648 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/WALs/101545f66cbd,39847,1733260774992/101545f66cbd%2C39847%2C1733260774992.1733260806543 is not closed yet, will try archiving it next time 2024-12-03T21:20:16,649 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/WALs/101545f66cbd,39847,1733260774992/101545f66cbd%2C39847%2C1733260774992.1733260775612 to hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/oldWALs/101545f66cbd%2C39847%2C1733260774992.1733260775612 2024-12-03T21:20:16,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36839 is added to blk_1073741839_1015 (size=2520) 2024-12-03T21:20:16,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741839_1015 (size=2520) 2024-12-03T21:20:16,650 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41221 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T21:20:16,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41221 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T21:20:16,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41221 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-03T21:20:16,652 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-03T21:20:16,653 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T21:20:16,654 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T21:20:16,808 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39847 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-12-03T21:20:16,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4. 2024-12-03T21:20:16,809 INFO [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 293cfd4133ce8faac7938c16338c6df4 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-03T21:20:16,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/.tmp/info/d1532e70a9504c01b508fad8952133c6 is 1080, key is row0000/info:/1733260816639/Put/seqid=0 2024-12-03T21:20:16,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741843_1019 (size=6033) 2024-12-03T21:20:16,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36839 is added to blk_1073741843_1019 (size=6033) 2024-12-03T21:20:16,823 INFO [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/.tmp/info/d1532e70a9504c01b508fad8952133c6 2024-12-03T21:20:16,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/.tmp/info/d1532e70a9504c01b508fad8952133c6 as hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/info/d1532e70a9504c01b508fad8952133c6 2024-12-03T21:20:16,831 INFO [master/101545f66cbd:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-03T21:20:16,831 INFO [master/101545f66cbd:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-03T21:20:16,834 INFO [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/info/d1532e70a9504c01b508fad8952133c6, entries=1, sequenceid=18, filesize=5.9 K 2024-12-03T21:20:16,835 INFO [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 293cfd4133ce8faac7938c16338c6df4 in 26ms, sequenceid=18, compaction requested=false 2024-12-03T21:20:16,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 293cfd4133ce8faac7938c16338c6df4: 2024-12-03T21:20:16,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4. 2024-12-03T21:20:16,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-03T21:20:16,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41221 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-03T21:20:16,840 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-12-03T21:20:16,840 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 183 msec 2024-12-03T21:20:16,842 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 190 msec 2024-12-03T21:20:16,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:16,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:17,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:17,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:18,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:18,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:19,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:19,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:20,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:20,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:21,576 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 293cfd4133ce8faac7938c16338c6df4, had cached 0 bytes from a total of 14329 2024-12-03T21:20:21,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:21,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:22,897 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:22,897 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:23,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:23,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:24,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:24,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:25,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:25,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:26,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41221 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-03T21:20:26,725 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-03T21:20:26,728 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C39847%2C1733260774992.1733260826727 2024-12-03T21:20:26,734 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:20:26,734 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:20:26,734 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:20:26,734 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:20:26,734 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:20:26,735 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/WALs/101545f66cbd,39847,1733260774992/101545f66cbd%2C39847%2C1733260774992.1733260816641 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/WALs/101545f66cbd,39847,1733260774992/101545f66cbd%2C39847%2C1733260774992.1733260826727 2024-12-03T21:20:26,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741842_1018 (size=2026) 2024-12-03T21:20:26,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36839 is added to blk_1073741842_1018 (size=2026) 2024-12-03T21:20:26,739 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36949:36949),(127.0.0.1/127.0.0.1:45959:45959)] 2024-12-03T21:20:26,740 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/WALs/101545f66cbd,39847,1733260774992/101545f66cbd%2C39847%2C1733260774992.1733260806543 to hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/oldWALs/101545f66cbd%2C39847%2C1733260774992.1733260806543 2024-12-03T21:20:26,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-03T21:20:26,740 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T21:20:26,740 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T21:20:26,740 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:20:26,740 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:20:26,740 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T21:20:26,740 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T21:20:26,740 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1197060721, stopped=false 2024-12-03T21:20:26,740 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=101545f66cbd,41221,1733260774839 2024-12-03T21:20:26,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41221-0x1019e5a3f8c0000, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T21:20:26,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x1019e5a3f8c0001, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T21:20:26,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41221-0x1019e5a3f8c0000, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:20:26,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x1019e5a3f8c0001, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:20:26,776 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T21:20:26,777 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T21:20:26,777 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T21:20:26,777 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:20:26,777 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39847-0x1019e5a3f8c0001, quorum=127.0.0.1:51630, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T21:20:26,777 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '101545f66cbd,39847,1733260774992' ***** 2024-12-03T21:20:26,777 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T21:20:26,778 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41221-0x1019e5a3f8c0000, quorum=127.0.0.1:51630, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T21:20:26,778 INFO [RS:0;101545f66cbd:39847 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T21:20:26,778 INFO [RS:0;101545f66cbd:39847 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T21:20:26,778 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T21:20:26,778 INFO [RS:0;101545f66cbd:39847 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T21:20:26,778 INFO [RS:0;101545f66cbd:39847 {}] regionserver.HRegionServer(3091): Received CLOSE for 293cfd4133ce8faac7938c16338c6df4 2024-12-03T21:20:26,778 INFO [RS:0;101545f66cbd:39847 {}] regionserver.HRegionServer(959): stopping server 101545f66cbd,39847,1733260774992 2024-12-03T21:20:26,778 INFO [RS:0;101545f66cbd:39847 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T21:20:26,778 INFO [RS:0;101545f66cbd:39847 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;101545f66cbd:39847. 2024-12-03T21:20:26,779 DEBUG [RS:0;101545f66cbd:39847 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T21:20:26,779 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 293cfd4133ce8faac7938c16338c6df4, disabling compactions & flushes 2024-12-03T21:20:26,779 DEBUG [RS:0;101545f66cbd:39847 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:20:26,779 INFO [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4. 2024-12-03T21:20:26,779 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4. 2024-12-03T21:20:26,779 INFO [RS:0;101545f66cbd:39847 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T21:20:26,779 INFO [RS:0;101545f66cbd:39847 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T21:20:26,779 INFO [RS:0;101545f66cbd:39847 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T21:20:26,779 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4. after waiting 0 ms 2024-12-03T21:20:26,779 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4. 2024-12-03T21:20:26,779 INFO [RS:0;101545f66cbd:39847 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-03T21:20:26,779 INFO [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 293cfd4133ce8faac7938c16338c6df4 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-03T21:20:26,779 INFO [RS:0;101545f66cbd:39847 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-03T21:20:26,779 DEBUG [RS:0;101545f66cbd:39847 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 293cfd4133ce8faac7938c16338c6df4=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4.} 2024-12-03T21:20:26,779 DEBUG [RS:0;101545f66cbd:39847 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 293cfd4133ce8faac7938c16338c6df4 2024-12-03T21:20:26,779 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T21:20:26,779 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T21:20:26,779 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T21:20:26,779 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T21:20:26,779 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T21:20:26,780 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-12-03T21:20:26,784 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/.tmp/info/1db27f291757407fb5ea5ef288489dbf is 1080, key is row0001/info:/1733260826726/Put/seqid=0 2024-12-03T21:20:26,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741845_1021 (size=6033) 2024-12-03T21:20:26,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36839 is added to blk_1073741845_1021 (size=6033) 2024-12-03T21:20:26,790 INFO [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/.tmp/info/1db27f291757407fb5ea5ef288489dbf 2024-12-03T21:20:26,796 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/.tmp/info/1db27f291757407fb5ea5ef288489dbf as hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/info/1db27f291757407fb5ea5ef288489dbf 2024-12-03T21:20:26,798 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/hbase/meta/1588230740/.tmp/info/e9420ff5f8924db394b3f537907cb8e3 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4./info:regioninfo/1733260776596/Put/seqid=0 2024-12-03T21:20:26,802 INFO [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/info/1db27f291757407fb5ea5ef288489dbf, entries=1, sequenceid=22, filesize=5.9 K 2024-12-03T21:20:26,803 INFO [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 293cfd4133ce8faac7938c16338c6df4 in 24ms, sequenceid=22, compaction requested=true 2024-12-03T21:20:26,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741846_1022 (size=7308) 2024-12-03T21:20:26,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36839 is added to blk_1073741846_1022 (size=7308) 2024-12-03T21:20:26,805 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/hbase/meta/1588230740/.tmp/info/e9420ff5f8924db394b3f537907cb8e3 2024-12-03T21:20:26,807 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/info/7985aa057b614590a602906fb80de7e6, hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/info/5059f79bc7f44897a9a534100dd726f1, hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/info/0819216a3aed462082a4007812d2eb98] to archive 2024-12-03T21:20:26,808 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T21:20:26,810 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/info/7985aa057b614590a602906fb80de7e6 to hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/info/7985aa057b614590a602906fb80de7e6 2024-12-03T21:20:26,811 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/info/5059f79bc7f44897a9a534100dd726f1 to hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/info/5059f79bc7f44897a9a534100dd726f1 2024-12-03T21:20:26,812 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/info/0819216a3aed462082a4007812d2eb98 to hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/info/0819216a3aed462082a4007812d2eb98 2024-12-03T21:20:26,813 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=101545f66cbd:41221 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-03T21:20:26,813 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [7985aa057b614590a602906fb80de7e6=6033, 5059f79bc7f44897a9a534100dd726f1=6033, 0819216a3aed462082a4007812d2eb98=6033] 2024-12-03T21:20:26,817 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/293cfd4133ce8faac7938c16338c6df4/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-12-03T21:20:26,817 INFO [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4. 2024-12-03T21:20:26,817 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 293cfd4133ce8faac7938c16338c6df4: Waiting for close lock at 1733260826779Running coprocessor pre-close hooks at 1733260826779Disabling compacts and flushes for region at 1733260826779Disabling writes for close at 1733260826779Obtaining lock to block concurrent updates at 1733260826779Preparing flush snapshotting stores in 293cfd4133ce8faac7938c16338c6df4 at 1733260826779Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1733260826779Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4. at 1733260826780 (+1 ms)Flushing 293cfd4133ce8faac7938c16338c6df4/info: creating writer at 1733260826780Flushing 293cfd4133ce8faac7938c16338c6df4/info: appending metadata at 1733260826784 (+4 ms)Flushing 293cfd4133ce8faac7938c16338c6df4/info: closing flushed file at 1733260826784Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@c59d169: reopening flushed file at 1733260826795 (+11 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 293cfd4133ce8faac7938c16338c6df4 in 24ms, sequenceid=22, compaction requested=true at 1733260826803 (+8 ms)Writing region close event to WAL at 1733260826813 (+10 ms)Running coprocessor post-close hooks at 1733260826817 (+4 ms)Closed at 1733260826817 2024-12-03T21:20:26,817 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733260776234.293cfd4133ce8faac7938c16338c6df4. 2024-12-03T21:20:26,823 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/hbase/meta/1588230740/.tmp/ns/50ad0ca02a07498b93af0a87ab6cce08 is 43, key is default/ns:d/1733260776101/Put/seqid=0 2024-12-03T21:20:26,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741847_1023 (size=5153) 2024-12-03T21:20:26,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36839 is added to blk_1073741847_1023 (size=5153) 2024-12-03T21:20:26,828 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/hbase/meta/1588230740/.tmp/ns/50ad0ca02a07498b93af0a87ab6cce08 2024-12-03T21:20:26,845 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/hbase/meta/1588230740/.tmp/table/7b35d0d6f83e45c682534b48e0f530a9 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1733260776608/Put/seqid=0 2024-12-03T21:20:26,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36839 is added to blk_1073741848_1024 (size=5508) 2024-12-03T21:20:26,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741848_1024 (size=5508) 2024-12-03T21:20:26,850 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/hbase/meta/1588230740/.tmp/table/7b35d0d6f83e45c682534b48e0f530a9 2024-12-03T21:20:26,856 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/hbase/meta/1588230740/.tmp/info/e9420ff5f8924db394b3f537907cb8e3 as hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/hbase/meta/1588230740/info/e9420ff5f8924db394b3f537907cb8e3 2024-12-03T21:20:26,862 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/hbase/meta/1588230740/info/e9420ff5f8924db394b3f537907cb8e3, entries=10, sequenceid=11, filesize=7.1 K 2024-12-03T21:20:26,863 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/hbase/meta/1588230740/.tmp/ns/50ad0ca02a07498b93af0a87ab6cce08 as hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/hbase/meta/1588230740/ns/50ad0ca02a07498b93af0a87ab6cce08 2024-12-03T21:20:26,868 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/hbase/meta/1588230740/ns/50ad0ca02a07498b93af0a87ab6cce08, entries=2, sequenceid=11, filesize=5.0 K 2024-12-03T21:20:26,869 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/hbase/meta/1588230740/.tmp/table/7b35d0d6f83e45c682534b48e0f530a9 as hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/hbase/meta/1588230740/table/7b35d0d6f83e45c682534b48e0f530a9 2024-12-03T21:20:26,873 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/hbase/meta/1588230740/table/7b35d0d6f83e45c682534b48e0f530a9, entries=2, sequenceid=11, filesize=5.4 K 2024-12-03T21:20:26,874 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 95ms, sequenceid=11, compaction requested=false 2024-12-03T21:20:26,878 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-03T21:20:26,878 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T21:20:26,878 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T21:20:26,879 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733260826779Running coprocessor pre-close hooks at 1733260826779Disabling compacts and flushes for region at 1733260826779Disabling writes for close at 1733260826779Obtaining lock to block concurrent updates at 1733260826780 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733260826780Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1733260826780Flushing stores of hbase:meta,,1.1588230740 at 1733260826781 (+1 ms)Flushing 1588230740/info: creating writer at 1733260826781Flushing 1588230740/info: appending metadata at 1733260826797 (+16 ms)Flushing 1588230740/info: closing flushed file at 1733260826797Flushing 1588230740/ns: creating writer at 1733260826810 (+13 ms)Flushing 1588230740/ns: appending metadata at 1733260826823 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1733260826823Flushing 1588230740/table: creating writer at 1733260826833 (+10 ms)Flushing 1588230740/table: appending metadata at 1733260826845 (+12 ms)Flushing 1588230740/table: closing flushed file at 1733260826845Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7fb4fa6f: reopening flushed file at 1733260826855 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@fb17a6f: reopening flushed file at 1733260826862 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3e0111b2: reopening flushed file at 1733260826868 (+6 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 95ms, sequenceid=11, compaction requested=false at 1733260826874 (+6 ms)Writing region close event to WAL at 1733260826875 (+1 ms)Running coprocessor post-close hooks at 1733260826878 (+3 ms)Closed at 1733260826878 2024-12-03T21:20:26,879 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-03T21:20:26,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:26,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:26,979 INFO [RS:0;101545f66cbd:39847 {}] regionserver.HRegionServer(976): stopping server 101545f66cbd,39847,1733260774992; all regions closed. 2024-12-03T21:20:26,980 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:20:26,980 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:20:26,981 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:20:26,981 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:20:26,983 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:20:26,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741834_1010 (size=3306) 2024-12-03T21:20:26,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36839 is added to blk_1073741834_1010 (size=3306) 2024-12-03T21:20:26,990 DEBUG [RS:0;101545f66cbd:39847 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/oldWALs 2024-12-03T21:20:26,990 INFO [RS:0;101545f66cbd:39847 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 101545f66cbd%2C39847%2C1733260774992.meta:.meta(num 1733260775999) 2024-12-03T21:20:26,991 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:20:26,991 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:20:26,991 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:20:26,991 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:20:26,991 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:20:26,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36839 is added to blk_1073741844_1020 (size=1252) 2024-12-03T21:20:26,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741844_1020 (size=1252) 2024-12-03T21:20:26,995 DEBUG [RS:0;101545f66cbd:39847 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/oldWALs 2024-12-03T21:20:26,995 INFO [RS:0;101545f66cbd:39847 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 101545f66cbd%2C39847%2C1733260774992:(num 1733260826727) 2024-12-03T21:20:26,995 DEBUG [RS:0;101545f66cbd:39847 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:20:26,995 INFO [RS:0;101545f66cbd:39847 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T21:20:26,996 INFO [RS:0;101545f66cbd:39847 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T21:20:26,996 INFO [RS:0;101545f66cbd:39847 {}] hbase.ChoreService(370): Chore service for: regionserver/101545f66cbd:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-03T21:20:26,996 INFO [RS:0;101545f66cbd:39847 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T21:20:26,996 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T21:20:26,996 INFO [RS:0;101545f66cbd:39847 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39847 2024-12-03T21:20:27,004 INFO [RS:0;101545f66cbd:39847 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T21:20:27,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41221-0x1019e5a3f8c0000, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T21:20:27,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x1019e5a3f8c0001, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/101545f66cbd,39847,1733260774992 2024-12-03T21:20:27,012 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [101545f66cbd,39847,1733260774992] 2024-12-03T21:20:27,021 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/101545f66cbd,39847,1733260774992 already deleted, retry=false 2024-12-03T21:20:27,021 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 101545f66cbd,39847,1733260774992 expired; onlineServers=0 2024-12-03T21:20:27,021 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '101545f66cbd,41221,1733260774839' ***** 2024-12-03T21:20:27,021 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T21:20:27,021 INFO [M:0;101545f66cbd:41221 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T21:20:27,021 INFO [M:0;101545f66cbd:41221 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T21:20:27,021 DEBUG [M:0;101545f66cbd:41221 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T21:20:27,021 DEBUG [M:0;101545f66cbd:41221 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T21:20:27,021 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T21:20:27,021 DEBUG [master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.small.0-1733260775374 {}] cleaner.HFileCleaner(306): Exit Thread[master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.small.0-1733260775374,5,FailOnTimeoutGroup] 2024-12-03T21:20:27,021 DEBUG [master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.large.0-1733260775374 {}] cleaner.HFileCleaner(306): Exit Thread[master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.large.0-1733260775374,5,FailOnTimeoutGroup] 2024-12-03T21:20:27,021 INFO [M:0;101545f66cbd:41221 {}] hbase.ChoreService(370): Chore service for: master/101545f66cbd:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-03T21:20:27,022 INFO [M:0;101545f66cbd:41221 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T21:20:27,022 DEBUG [M:0;101545f66cbd:41221 {}] master.HMaster(1795): Stopping service threads 2024-12-03T21:20:27,022 INFO [M:0;101545f66cbd:41221 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T21:20:27,022 INFO [M:0;101545f66cbd:41221 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T21:20:27,022 INFO [M:0;101545f66cbd:41221 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T21:20:27,022 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T21:20:27,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41221-0x1019e5a3f8c0000, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T21:20:27,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41221-0x1019e5a3f8c0000, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:20:27,029 DEBUG [M:0;101545f66cbd:41221 {}] zookeeper.ZKUtil(347): master:41221-0x1019e5a3f8c0000, quorum=127.0.0.1:51630, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-03T21:20:27,029 WARN [M:0;101545f66cbd:41221 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-03T21:20:27,030 INFO [M:0;101545f66cbd:41221 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/.lastflushedseqids 2024-12-03T21:20:27,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36839 is added to blk_1073741849_1025 (size=130) 2024-12-03T21:20:27,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741849_1025 (size=130) 2024-12-03T21:20:27,040 INFO [M:0;101545f66cbd:41221 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-03T21:20:27,040 INFO [M:0;101545f66cbd:41221 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T21:20:27,040 DEBUG [M:0;101545f66cbd:41221 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T21:20:27,040 INFO [M:0;101545f66cbd:41221 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:20:27,040 DEBUG [M:0;101545f66cbd:41221 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:20:27,040 DEBUG [M:0;101545f66cbd:41221 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T21:20:27,040 DEBUG [M:0;101545f66cbd:41221 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:20:27,041 INFO [M:0;101545f66cbd:41221 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.60 KB heapSize=55.01 KB 2024-12-03T21:20:27,058 DEBUG [M:0;101545f66cbd:41221 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b22e9fad9a244ca9aa1b647ae677ffc9 is 82, key is hbase:meta,,1/info:regioninfo/1733260776029/Put/seqid=0 2024-12-03T21:20:27,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741850_1026 (size=5672) 2024-12-03T21:20:27,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36839 is added to blk_1073741850_1026 (size=5672) 2024-12-03T21:20:27,063 INFO [M:0;101545f66cbd:41221 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b22e9fad9a244ca9aa1b647ae677ffc9 2024-12-03T21:20:27,082 DEBUG [M:0;101545f66cbd:41221 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/441793c2087b424e9848e95ae28e7ca4 is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733260776614/Put/seqid=0 2024-12-03T21:20:27,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741851_1027 (size=7824) 2024-12-03T21:20:27,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36839 is added to blk_1073741851_1027 (size=7824) 2024-12-03T21:20:27,086 INFO [M:0;101545f66cbd:41221 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=43.00 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/441793c2087b424e9848e95ae28e7ca4 2024-12-03T21:20:27,090 INFO [M:0;101545f66cbd:41221 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 441793c2087b424e9848e95ae28e7ca4 2024-12-03T21:20:27,104 DEBUG [M:0;101545f66cbd:41221 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/163a846cee9b487db3aa1d3c36774f5d is 69, key is 101545f66cbd,39847,1733260774992/rs:state/1733260775459/Put/seqid=0 2024-12-03T21:20:27,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36839 is added to blk_1073741852_1028 (size=5156) 2024-12-03T21:20:27,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741852_1028 (size=5156) 2024-12-03T21:20:27,109 INFO [M:0;101545f66cbd:41221 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/163a846cee9b487db3aa1d3c36774f5d 2024-12-03T21:20:27,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x1019e5a3f8c0001, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T21:20:27,112 INFO [RS:0;101545f66cbd:39847 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T21:20:27,112 INFO [RS:0;101545f66cbd:39847 {}] regionserver.HRegionServer(1031): Exiting; stopping=101545f66cbd,39847,1733260774992; zookeeper connection closed. 2024-12-03T21:20:27,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x1019e5a3f8c0001, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T21:20:27,113 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@656f6e2d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@656f6e2d 2024-12-03T21:20:27,113 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-03T21:20:27,127 DEBUG [M:0;101545f66cbd:41221 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/dac850bfbe0645f386c29d65d1377450 is 52, key is load_balancer_on/state:d/1733260776230/Put/seqid=0 2024-12-03T21:20:27,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741853_1029 (size=5056) 2024-12-03T21:20:27,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36839 is added to blk_1073741853_1029 (size=5056) 2024-12-03T21:20:27,132 INFO [M:0;101545f66cbd:41221 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/dac850bfbe0645f386c29d65d1377450 2024-12-03T21:20:27,137 DEBUG [M:0;101545f66cbd:41221 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b22e9fad9a244ca9aa1b647ae677ffc9 as hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b22e9fad9a244ca9aa1b647ae677ffc9 2024-12-03T21:20:27,142 INFO [M:0;101545f66cbd:41221 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b22e9fad9a244ca9aa1b647ae677ffc9, entries=8, sequenceid=121, filesize=5.5 K 2024-12-03T21:20:27,144 DEBUG [M:0;101545f66cbd:41221 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/441793c2087b424e9848e95ae28e7ca4 as hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/441793c2087b424e9848e95ae28e7ca4 2024-12-03T21:20:27,149 INFO [M:0;101545f66cbd:41221 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 441793c2087b424e9848e95ae28e7ca4 2024-12-03T21:20:27,149 INFO [M:0;101545f66cbd:41221 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/441793c2087b424e9848e95ae28e7ca4, entries=14, sequenceid=121, filesize=7.6 K 2024-12-03T21:20:27,150 DEBUG [M:0;101545f66cbd:41221 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/163a846cee9b487db3aa1d3c36774f5d as hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/163a846cee9b487db3aa1d3c36774f5d 2024-12-03T21:20:27,156 INFO [M:0;101545f66cbd:41221 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/163a846cee9b487db3aa1d3c36774f5d, entries=1, sequenceid=121, filesize=5.0 K 2024-12-03T21:20:27,157 DEBUG [M:0;101545f66cbd:41221 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/dac850bfbe0645f386c29d65d1377450 as hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/dac850bfbe0645f386c29d65d1377450 2024-12-03T21:20:27,162 INFO [M:0;101545f66cbd:41221 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38247/user/jenkins/test-data/d4679b23-6071-1752-3389-b47d1348385c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/dac850bfbe0645f386c29d65d1377450, entries=1, sequenceid=121, filesize=4.9 K 2024-12-03T21:20:27,163 INFO [M:0;101545f66cbd:41221 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.60 KB/44647, heapSize ~54.95 KB/56264, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 123ms, sequenceid=121, compaction requested=false 2024-12-03T21:20:27,165 INFO [M:0;101545f66cbd:41221 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:20:27,165 DEBUG [M:0;101545f66cbd:41221 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733260827040Disabling compacts and flushes for region at 1733260827040Disabling writes for close at 1733260827040Obtaining lock to block concurrent updates at 1733260827041 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733260827041Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44647, getHeapSize=56264, getOffHeapSize=0, getCellsCount=140 at 1733260827041Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733260827042 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733260827042Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733260827058 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733260827058Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733260827068 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733260827081 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733260827081Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733260827090 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733260827104 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733260827104Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733260827114 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733260827127 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733260827127Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@428bf6d8: reopening flushed file at 1733260827136 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3b580bee: reopening flushed file at 1733260827143 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@33dd4ab4: reopening flushed file at 1733260827149 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@73aed32e: reopening flushed file at 1733260827156 (+7 ms)Finished flush of dataSize ~43.60 KB/44647, heapSize ~54.95 KB/56264, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 123ms, sequenceid=121, compaction requested=false at 1733260827163 (+7 ms)Writing region close event to WAL at 1733260827165 (+2 ms)Closed at 1733260827165 2024-12-03T21:20:27,165 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:20:27,166 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:20:27,166 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:20:27,166 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:20:27,166 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:20:27,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741830_1006 (size=53044) 2024-12-03T21:20:27,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36839 is added to blk_1073741830_1006 (size=53044) 2024-12-03T21:20:27,170 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T21:20:27,170 INFO [M:0;101545f66cbd:41221 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-03T21:20:27,170 INFO [M:0;101545f66cbd:41221 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41221 2024-12-03T21:20:27,170 INFO [M:0;101545f66cbd:41221 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T21:20:27,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41221-0x1019e5a3f8c0000, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T21:20:27,279 INFO [M:0;101545f66cbd:41221 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T21:20:27,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41221-0x1019e5a3f8c0000, quorum=127.0.0.1:51630, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T21:20:27,286 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@38e67c7c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:20:27,286 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@713ad6b2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T21:20:27,286 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T21:20:27,287 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ff0a5a3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T21:20:27,287 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@64c5d398{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/hadoop.log.dir/,STOPPED} 2024-12-03T21:20:27,289 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T21:20:27,289 WARN [BP-2020687819-172.17.0.2-1733260772815 heartbeating to localhost/127.0.0.1:38247 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T21:20:27,289 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T21:20:27,289 WARN [BP-2020687819-172.17.0.2-1733260772815 heartbeating to localhost/127.0.0.1:38247 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2020687819-172.17.0.2-1733260772815 (Datanode Uuid 9f346f05-c05d-4ef9-8b40-2d2af7ade5ce) service to localhost/127.0.0.1:38247 2024-12-03T21:20:27,289 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/cluster_8d784e81-dc09-76a6-88ec-43b8caec4c60/data/data3/current/BP-2020687819-172.17.0.2-1733260772815 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:20:27,290 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/cluster_8d784e81-dc09-76a6-88ec-43b8caec4c60/data/data4/current/BP-2020687819-172.17.0.2-1733260772815 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:20:27,290 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T21:20:27,293 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@58258a79{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:20:27,293 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@50c8efd9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T21:20:27,293 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T21:20:27,293 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7b82a74b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T21:20:27,293 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25b87dc9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/hadoop.log.dir/,STOPPED} 2024-12-03T21:20:27,295 WARN [BP-2020687819-172.17.0.2-1733260772815 heartbeating to localhost/127.0.0.1:38247 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T21:20:27,295 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T21:20:27,295 WARN [BP-2020687819-172.17.0.2-1733260772815 heartbeating to localhost/127.0.0.1:38247 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2020687819-172.17.0.2-1733260772815 (Datanode Uuid 4df981b3-fd43-4671-be13-71814bf6ba74) service to localhost/127.0.0.1:38247 2024-12-03T21:20:27,295 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T21:20:27,296 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/cluster_8d784e81-dc09-76a6-88ec-43b8caec4c60/data/data1/current/BP-2020687819-172.17.0.2-1733260772815 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:20:27,296 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/cluster_8d784e81-dc09-76a6-88ec-43b8caec4c60/data/data2/current/BP-2020687819-172.17.0.2-1733260772815 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:20:27,296 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T21:20:27,303 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@153337d2{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T21:20:27,303 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@58d2b6e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T21:20:27,303 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T21:20:27,303 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fb6de9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T21:20:27,303 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b50defd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/hadoop.log.dir/,STOPPED} 2024-12-03T21:20:27,309 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-03T21:20:27,327 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-03T21:20:27,334 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=207 (was 180) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38247 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:38247 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:38247 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38247 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:38247 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38247 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38247 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38247 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:38247 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=146 (was 125) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3039 (was 1953) - AvailableMemoryMB LEAK? - 2024-12-03T21:20:27,341 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=207, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=146, ProcessCount=11, AvailableMemoryMB=3038 2024-12-03T21:20:27,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T21:20:27,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/hadoop.log.dir so I do NOT create it in target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095 2024-12-03T21:20:27,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/42846f72-f93d-1dda-a38b-5741255ed74a/hadoop.tmp.dir so I do NOT create it in target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095 2024-12-03T21:20:27,342 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/cluster_a65c1612-ba9e-9907-ee69-5b7a4f9b8525, deleteOnExit=true 2024-12-03T21:20:27,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-03T21:20:27,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/test.cache.data in system properties and HBase conf 2024-12-03T21:20:27,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T21:20:27,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/hadoop.log.dir in system properties and HBase conf 2024-12-03T21:20:27,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T21:20:27,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T21:20:27,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-03T21:20:27,343 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T21:20:27,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T21:20:27,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T21:20:27,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T21:20:27,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T21:20:27,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T21:20:27,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T21:20:27,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T21:20:27,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T21:20:27,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T21:20:27,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/nfs.dump.dir in system properties and HBase conf 2024-12-03T21:20:27,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/java.io.tmpdir in system properties and HBase conf 2024-12-03T21:20:27,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T21:20:27,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T21:20:27,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T21:20:27,361 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T21:20:27,485 INFO [regionserver/101545f66cbd:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T21:20:27,609 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:20:27,612 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T21:20:27,613 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T21:20:27,613 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T21:20:27,613 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T21:20:27,614 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:20:27,614 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@55e9d4da{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/hadoop.log.dir/,AVAILABLE} 2024-12-03T21:20:27,614 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@40e58970{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T21:20:27,702 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5fd0bcc5{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/java.io.tmpdir/jetty-localhost-38319-hadoop-hdfs-3_4_1-tests_jar-_-any-14231864197884233378/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T21:20:27,703 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@30591dbb{HTTP/1.1, (http/1.1)}{localhost:38319} 2024-12-03T21:20:27,703 INFO [Time-limited test {}] server.Server(415): Started @240800ms 2024-12-03T21:20:27,713 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T21:20:27,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:27,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:27,904 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:20:27,907 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T21:20:27,908 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T21:20:27,908 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T21:20:27,908 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T21:20:27,908 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5c8f3fae{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/hadoop.log.dir/,AVAILABLE} 2024-12-03T21:20:27,909 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@148b6f5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T21:20:27,997 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4aa2d315{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/java.io.tmpdir/jetty-localhost-43143-hadoop-hdfs-3_4_1-tests_jar-_-any-14763990406873901531/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:20:27,998 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2079ccf2{HTTP/1.1, (http/1.1)}{localhost:43143} 2024-12-03T21:20:27,998 INFO [Time-limited test {}] server.Server(415): Started @241096ms 2024-12-03T21:20:27,999 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T21:20:28,022 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:20:28,024 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T21:20:28,025 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T21:20:28,025 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T21:20:28,025 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T21:20:28,025 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ec1477d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/hadoop.log.dir/,AVAILABLE} 2024-12-03T21:20:28,025 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@78cfb61d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T21:20:28,115 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@29b0734d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/java.io.tmpdir/jetty-localhost-43367-hadoop-hdfs-3_4_1-tests_jar-_-any-4501698007732118445/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:20:28,115 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5ea981d3{HTTP/1.1, (http/1.1)}{localhost:43367} 2024-12-03T21:20:28,115 INFO [Time-limited test {}] server.Server(415): Started @241213ms 2024-12-03T21:20:28,116 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T21:20:28,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:28,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:29,061 WARN [Thread-1952 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/cluster_a65c1612-ba9e-9907-ee69-5b7a4f9b8525/data/data1/current/BP-1637139593-172.17.0.2-1733260827364/current, will proceed with Du for space computation calculation, 2024-12-03T21:20:29,061 WARN [Thread-1953 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/cluster_a65c1612-ba9e-9907-ee69-5b7a4f9b8525/data/data2/current/BP-1637139593-172.17.0.2-1733260827364/current, will proceed with Du for space computation calculation, 2024-12-03T21:20:29,079 WARN [Thread-1916 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T21:20:29,081 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa50964ae2ca7c56f with lease ID 0xc2af4949b6814898: Processing first storage report for DS-c6c935f0-ec2a-4f19-8508-6797fa27b57c from datanode DatanodeRegistration(127.0.0.1:33457, datanodeUuid=6d8216db-721c-4cc5-9911-7ed327a974cf, infoPort=46439, infoSecurePort=0, ipcPort=41037, storageInfo=lv=-57;cid=testClusterID;nsid=799241911;c=1733260827364) 2024-12-03T21:20:29,081 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa50964ae2ca7c56f with lease ID 0xc2af4949b6814898: from storage DS-c6c935f0-ec2a-4f19-8508-6797fa27b57c node DatanodeRegistration(127.0.0.1:33457, datanodeUuid=6d8216db-721c-4cc5-9911-7ed327a974cf, infoPort=46439, infoSecurePort=0, ipcPort=41037, storageInfo=lv=-57;cid=testClusterID;nsid=799241911;c=1733260827364), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:20:29,081 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa50964ae2ca7c56f with lease ID 0xc2af4949b6814898: Processing first storage report for DS-b89e8e09-789e-4915-9b20-8118c5ee8e93 from datanode DatanodeRegistration(127.0.0.1:33457, datanodeUuid=6d8216db-721c-4cc5-9911-7ed327a974cf, infoPort=46439, infoSecurePort=0, ipcPort=41037, storageInfo=lv=-57;cid=testClusterID;nsid=799241911;c=1733260827364) 2024-12-03T21:20:29,081 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa50964ae2ca7c56f with lease ID 0xc2af4949b6814898: from storage DS-b89e8e09-789e-4915-9b20-8118c5ee8e93 node DatanodeRegistration(127.0.0.1:33457, datanodeUuid=6d8216db-721c-4cc5-9911-7ed327a974cf, infoPort=46439, infoSecurePort=0, ipcPort=41037, storageInfo=lv=-57;cid=testClusterID;nsid=799241911;c=1733260827364), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:20:29,178 WARN [Thread-1963 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/cluster_a65c1612-ba9e-9907-ee69-5b7a4f9b8525/data/data3/current/BP-1637139593-172.17.0.2-1733260827364/current, will proceed with Du for space computation calculation, 2024-12-03T21:20:29,179 WARN [Thread-1964 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/cluster_a65c1612-ba9e-9907-ee69-5b7a4f9b8525/data/data4/current/BP-1637139593-172.17.0.2-1733260827364/current, will proceed with Du for space computation calculation, 2024-12-03T21:20:29,196 WARN [Thread-1939 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T21:20:29,198 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7697f8ee16a3d358 with lease ID 0xc2af4949b6814899: Processing first storage report for DS-de72b144-b541-4a42-8e84-0d59ea943892 from datanode DatanodeRegistration(127.0.0.1:33265, datanodeUuid=04cfc471-2499-4a0a-bbaa-5aabf7de1124, infoPort=34459, infoSecurePort=0, ipcPort=44285, storageInfo=lv=-57;cid=testClusterID;nsid=799241911;c=1733260827364) 2024-12-03T21:20:29,198 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7697f8ee16a3d358 with lease ID 0xc2af4949b6814899: from storage DS-de72b144-b541-4a42-8e84-0d59ea943892 node DatanodeRegistration(127.0.0.1:33265, datanodeUuid=04cfc471-2499-4a0a-bbaa-5aabf7de1124, infoPort=34459, infoSecurePort=0, ipcPort=44285, storageInfo=lv=-57;cid=testClusterID;nsid=799241911;c=1733260827364), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:20:29,198 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7697f8ee16a3d358 with lease ID 0xc2af4949b6814899: Processing first storage report for DS-0ec26213-e1c2-4653-a306-0642402b0953 from datanode DatanodeRegistration(127.0.0.1:33265, datanodeUuid=04cfc471-2499-4a0a-bbaa-5aabf7de1124, infoPort=34459, infoSecurePort=0, ipcPort=44285, storageInfo=lv=-57;cid=testClusterID;nsid=799241911;c=1733260827364) 2024-12-03T21:20:29,198 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7697f8ee16a3d358 with lease ID 0xc2af4949b6814899: from storage DS-0ec26213-e1c2-4653-a306-0642402b0953 node DatanodeRegistration(127.0.0.1:33265, datanodeUuid=04cfc471-2499-4a0a-bbaa-5aabf7de1124, infoPort=34459, infoSecurePort=0, ipcPort=44285, storageInfo=lv=-57;cid=testClusterID;nsid=799241911;c=1733260827364), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:20:29,246 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095 2024-12-03T21:20:29,250 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/cluster_a65c1612-ba9e-9907-ee69-5b7a4f9b8525/zookeeper_0, clientPort=59875, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/cluster_a65c1612-ba9e-9907-ee69-5b7a4f9b8525/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/cluster_a65c1612-ba9e-9907-ee69-5b7a4f9b8525/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T21:20:29,251 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59875 2024-12-03T21:20:29,252 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:20:29,254 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:20:29,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741825_1001 (size=7) 2024-12-03T21:20:29,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741825_1001 (size=7) 2024-12-03T21:20:29,267 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b with version=8 2024-12-03T21:20:29,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/hbase-staging 2024-12-03T21:20:29,270 INFO [Time-limited test {}] client.ConnectionUtils(128): master/101545f66cbd:0 server-side Connection retries=45 2024-12-03T21:20:29,270 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T21:20:29,270 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T21:20:29,270 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T21:20:29,270 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T21:20:29,270 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T21:20:29,270 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-03T21:20:29,270 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T21:20:29,271 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36423 2024-12-03T21:20:29,272 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36423 connecting to ZooKeeper ensemble=127.0.0.1:59875 2024-12-03T21:20:29,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:364230x0, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T21:20:29,337 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36423-0x1019e5b14290000 connected 2024-12-03T21:20:29,421 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:20:29,425 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:20:29,430 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36423-0x1019e5b14290000, quorum=127.0.0.1:59875, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T21:20:29,430 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b, hbase.cluster.distributed=false 2024-12-03T21:20:29,432 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36423-0x1019e5b14290000, quorum=127.0.0.1:59875, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T21:20:29,433 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36423 2024-12-03T21:20:29,433 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36423 2024-12-03T21:20:29,433 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36423 2024-12-03T21:20:29,433 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36423 2024-12-03T21:20:29,433 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36423 2024-12-03T21:20:29,446 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/101545f66cbd:0 server-side Connection retries=45 2024-12-03T21:20:29,446 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T21:20:29,446 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T21:20:29,446 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T21:20:29,446 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T21:20:29,446 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T21:20:29,446 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T21:20:29,446 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T21:20:29,447 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46815 2024-12-03T21:20:29,448 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46815 connecting to ZooKeeper ensemble=127.0.0.1:59875 2024-12-03T21:20:29,448 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:20:29,449 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:20:29,462 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:468150x0, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T21:20:29,463 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:468150x0, quorum=127.0.0.1:59875, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T21:20:29,463 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46815-0x1019e5b14290001 connected 2024-12-03T21:20:29,463 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T21:20:29,463 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T21:20:29,464 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46815-0x1019e5b14290001, quorum=127.0.0.1:59875, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T21:20:29,465 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46815-0x1019e5b14290001, quorum=127.0.0.1:59875, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T21:20:29,465 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46815 2024-12-03T21:20:29,465 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46815 2024-12-03T21:20:29,466 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46815 2024-12-03T21:20:29,466 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46815 2024-12-03T21:20:29,466 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46815 2024-12-03T21:20:29,481 DEBUG [M:0;101545f66cbd:36423 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;101545f66cbd:36423 2024-12-03T21:20:29,482 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/101545f66cbd,36423,1733260829269 2024-12-03T21:20:29,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1019e5b14290001, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T21:20:29,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36423-0x1019e5b14290000, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T21:20:29,493 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36423-0x1019e5b14290000, quorum=127.0.0.1:59875, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/101545f66cbd,36423,1733260829269 2024-12-03T21:20:29,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1019e5b14290001, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T21:20:29,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36423-0x1019e5b14290000, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:20:29,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1019e5b14290001, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:20:29,504 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36423-0x1019e5b14290000, quorum=127.0.0.1:59875, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T21:20:29,504 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/101545f66cbd,36423,1733260829269 from backup master directory 2024-12-03T21:20:29,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1019e5b14290001, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T21:20:29,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36423-0x1019e5b14290000, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/101545f66cbd,36423,1733260829269 2024-12-03T21:20:29,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36423-0x1019e5b14290000, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T21:20:29,512 WARN [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T21:20:29,512 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=101545f66cbd,36423,1733260829269 2024-12-03T21:20:29,517 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/hbase.id] with ID: a678b682-2f20-4f62-b1bd-a185a4614015 2024-12-03T21:20:29,517 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/.tmp/hbase.id 2024-12-03T21:20:29,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741826_1002 (size=42) 2024-12-03T21:20:29,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741826_1002 (size=42) 2024-12-03T21:20:29,523 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/.tmp/hbase.id]:[hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/hbase.id] 2024-12-03T21:20:29,533 INFO [master/101545f66cbd:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:20:29,533 INFO [master/101545f66cbd:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-03T21:20:29,534 INFO [master/101545f66cbd:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-03T21:20:29,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36423-0x1019e5b14290000, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:20:29,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1019e5b14290001, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:20:29,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741827_1003 (size=196) 2024-12-03T21:20:29,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741827_1003 (size=196) 2024-12-03T21:20:29,552 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T21:20:29,552 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T21:20:29,552 INFO [master/101545f66cbd:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T21:20:29,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741828_1004 (size=1189) 2024-12-03T21:20:29,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741828_1004 (size=1189) 2024-12-03T21:20:29,560 INFO [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/MasterData/data/master/store 2024-12-03T21:20:29,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741829_1005 (size=34) 2024-12-03T21:20:29,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741829_1005 (size=34) 2024-12-03T21:20:29,580 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:20:29,580 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T21:20:29,580 INFO [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:20:29,580 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:20:29,580 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T21:20:29,580 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:20:29,580 INFO [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:20:29,580 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733260829580Disabling compacts and flushes for region at 1733260829580Disabling writes for close at 1733260829580Writing region close event to WAL at 1733260829580Closed at 1733260829580 2024-12-03T21:20:29,583 WARN [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/MasterData/data/master/store/.initializing 2024-12-03T21:20:29,583 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/MasterData/WALs/101545f66cbd,36423,1733260829269 2024-12-03T21:20:29,586 INFO [master/101545f66cbd:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=101545f66cbd%2C36423%2C1733260829269, suffix=, logDir=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/MasterData/WALs/101545f66cbd,36423,1733260829269, archiveDir=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/MasterData/oldWALs, maxLogs=10 2024-12-03T21:20:29,586 INFO [master/101545f66cbd:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C36423%2C1733260829269.1733260829586 2024-12-03T21:20:29,596 INFO [master/101545f66cbd:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/MasterData/WALs/101545f66cbd,36423,1733260829269/101545f66cbd%2C36423%2C1733260829269.1733260829586 2024-12-03T21:20:29,596 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46439:46439),(127.0.0.1/127.0.0.1:34459:34459)] 2024-12-03T21:20:29,599 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T21:20:29,599 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:20:29,599 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:20:29,600 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:20:29,602 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:20:29,603 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T21:20:29,603 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:20:29,604 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:20:29,604 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:20:29,605 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T21:20:29,605 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:20:29,605 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T21:20:29,605 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:20:29,606 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T21:20:29,607 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:20:29,607 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T21:20:29,607 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:20:29,608 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T21:20:29,608 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:20:29,608 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T21:20:29,609 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:20:29,609 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:20:29,610 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:20:29,611 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:20:29,611 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:20:29,611 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T21:20:29,612 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:20:29,614 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T21:20:29,614 INFO [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=835894, jitterRate=0.06289508938789368}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T21:20:29,615 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733260829600Initializing all the Stores at 1733260829600Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260829600Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260829601 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260829601Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260829601Cleaning up temporary data from old regions at 1733260829611 (+10 ms)Region opened successfully at 1733260829615 (+4 ms) 2024-12-03T21:20:29,615 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T21:20:29,618 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c5bb135, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=101545f66cbd/172.17.0.2:0 2024-12-03T21:20:29,618 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-03T21:20:29,619 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T21:20:29,619 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T21:20:29,619 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T21:20:29,619 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-03T21:20:29,620 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-03T21:20:29,620 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T21:20:29,622 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T21:20:29,623 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36423-0x1019e5b14290000, quorum=127.0.0.1:59875, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T21:20:29,629 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-03T21:20:29,629 INFO [master/101545f66cbd:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T21:20:29,630 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36423-0x1019e5b14290000, quorum=127.0.0.1:59875, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T21:20:29,637 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-03T21:20:29,637 INFO [master/101545f66cbd:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T21:20:29,639 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36423-0x1019e5b14290000, quorum=127.0.0.1:59875, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T21:20:29,645 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-03T21:20:29,646 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36423-0x1019e5b14290000, quorum=127.0.0.1:59875, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T21:20:29,654 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T21:20:29,655 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36423-0x1019e5b14290000, quorum=127.0.0.1:59875, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T21:20:29,662 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T21:20:29,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1019e5b14290001, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T21:20:29,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36423-0x1019e5b14290000, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T21:20:29,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1019e5b14290001, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:20:29,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36423-0x1019e5b14290000, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:20:29,671 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=101545f66cbd,36423,1733260829269, sessionid=0x1019e5b14290000, setting cluster-up flag (Was=false) 2024-12-03T21:20:29,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1019e5b14290001, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:20:29,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36423-0x1019e5b14290000, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:20:29,712 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T21:20:29,713 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=101545f66cbd,36423,1733260829269 2024-12-03T21:20:29,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1019e5b14290001, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:20:29,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36423-0x1019e5b14290000, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:20:29,754 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T21:20:29,755 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=101545f66cbd,36423,1733260829269 2024-12-03T21:20:29,756 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-03T21:20:29,758 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-03T21:20:29,758 INFO [master/101545f66cbd:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-03T21:20:29,758 INFO [master/101545f66cbd:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T21:20:29,758 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 101545f66cbd,36423,1733260829269 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T21:20:29,760 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/101545f66cbd:0, corePoolSize=5, maxPoolSize=5 2024-12-03T21:20:29,760 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/101545f66cbd:0, corePoolSize=5, maxPoolSize=5 2024-12-03T21:20:29,760 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/101545f66cbd:0, corePoolSize=5, maxPoolSize=5 2024-12-03T21:20:29,760 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/101545f66cbd:0, corePoolSize=5, maxPoolSize=5 2024-12-03T21:20:29,760 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/101545f66cbd:0, corePoolSize=10, maxPoolSize=10 2024-12-03T21:20:29,760 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:20:29,760 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/101545f66cbd:0, corePoolSize=2, maxPoolSize=2 2024-12-03T21:20:29,760 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:20:29,763 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T21:20:29,763 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-03T21:20:29,764 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:20:29,764 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T21:20:29,767 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733260859767 2024-12-03T21:20:29,768 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T21:20:29,768 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T21:20:29,768 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T21:20:29,768 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T21:20:29,768 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T21:20:29,768 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T21:20:29,771 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T21:20:29,772 INFO [RS:0;101545f66cbd:46815 {}] regionserver.HRegionServer(746): ClusterId : a678b682-2f20-4f62-b1bd-a185a4614015 2024-12-03T21:20:29,772 DEBUG [RS:0;101545f66cbd:46815 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T21:20:29,772 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T21:20:29,772 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T21:20:29,772 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T21:20:29,773 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T21:20:29,773 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T21:20:29,774 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.large.0-1733260829773,5,FailOnTimeoutGroup] 2024-12-03T21:20:29,777 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.small.0-1733260829774,5,FailOnTimeoutGroup] 2024-12-03T21:20:29,777 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T21:20:29,777 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T21:20:29,777 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T21:20:29,777 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T21:20:29,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741831_1007 (size=1321) 2024-12-03T21:20:29,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741831_1007 (size=1321) 2024-12-03T21:20:29,779 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-03T21:20:29,779 DEBUG [RS:0;101545f66cbd:46815 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T21:20:29,779 DEBUG [RS:0;101545f66cbd:46815 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T21:20:29,779 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b 2024-12-03T21:20:29,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741832_1008 (size=32) 2024-12-03T21:20:29,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741832_1008 (size=32) 2024-12-03T21:20:29,787 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:20:29,788 DEBUG [RS:0;101545f66cbd:46815 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T21:20:29,788 DEBUG [RS:0;101545f66cbd:46815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69143b49, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=101545f66cbd/172.17.0.2:0 2024-12-03T21:20:29,788 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T21:20:29,789 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T21:20:29,789 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:20:29,790 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:20:29,790 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T21:20:29,792 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T21:20:29,792 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:20:29,792 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:20:29,792 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T21:20:29,793 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T21:20:29,793 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:20:29,794 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:20:29,794 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T21:20:29,795 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T21:20:29,795 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:20:29,795 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:20:29,795 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T21:20:29,796 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/hbase/meta/1588230740 2024-12-03T21:20:29,796 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/hbase/meta/1588230740 2024-12-03T21:20:29,797 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T21:20:29,797 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T21:20:29,798 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T21:20:29,799 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T21:20:29,801 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T21:20:29,801 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=812971, jitterRate=0.033746108412742615}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T21:20:29,802 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733260829787Initializing all the Stores at 1733260829788 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260829788Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260829788Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260829788Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260829788Cleaning up temporary data from old regions at 1733260829797 (+9 ms)Region opened successfully at 1733260829802 (+5 ms) 2024-12-03T21:20:29,802 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T21:20:29,802 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T21:20:29,802 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T21:20:29,802 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T21:20:29,802 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T21:20:29,802 DEBUG [RS:0;101545f66cbd:46815 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;101545f66cbd:46815 2024-12-03T21:20:29,802 INFO [RS:0;101545f66cbd:46815 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T21:20:29,802 INFO [RS:0;101545f66cbd:46815 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T21:20:29,802 DEBUG [RS:0;101545f66cbd:46815 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T21:20:29,803 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T21:20:29,803 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733260829802Disabling compacts and flushes for region at 1733260829802Disabling writes for close at 1733260829802Writing region close event to WAL at 1733260829803 (+1 ms)Closed at 1733260829803 2024-12-03T21:20:29,803 INFO [RS:0;101545f66cbd:46815 {}] regionserver.HRegionServer(2659): reportForDuty to master=101545f66cbd,36423,1733260829269 with port=46815, startcode=1733260829446 2024-12-03T21:20:29,804 DEBUG [RS:0;101545f66cbd:46815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T21:20:29,804 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T21:20:29,804 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-03T21:20:29,804 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T21:20:29,805 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T21:20:29,806 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T21:20:29,808 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44979, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T21:20:29,808 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36423 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 101545f66cbd,46815,1733260829446 2024-12-03T21:20:29,809 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36423 {}] master.ServerManager(517): Registering regionserver=101545f66cbd,46815,1733260829446 2024-12-03T21:20:29,810 DEBUG [RS:0;101545f66cbd:46815 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b 2024-12-03T21:20:29,810 DEBUG [RS:0;101545f66cbd:46815 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43303 2024-12-03T21:20:29,810 DEBUG [RS:0;101545f66cbd:46815 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T21:20:29,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36423-0x1019e5b14290000, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T21:20:29,821 DEBUG [RS:0;101545f66cbd:46815 {}] zookeeper.ZKUtil(111): regionserver:46815-0x1019e5b14290001, quorum=127.0.0.1:59875, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/101545f66cbd,46815,1733260829446 2024-12-03T21:20:29,821 WARN [RS:0;101545f66cbd:46815 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T21:20:29,821 INFO [RS:0;101545f66cbd:46815 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T21:20:29,821 DEBUG [RS:0;101545f66cbd:46815 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/WALs/101545f66cbd,46815,1733260829446 2024-12-03T21:20:29,821 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [101545f66cbd,46815,1733260829446] 2024-12-03T21:20:29,824 INFO [RS:0;101545f66cbd:46815 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T21:20:29,826 INFO [RS:0;101545f66cbd:46815 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T21:20:29,826 INFO [RS:0;101545f66cbd:46815 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T21:20:29,826 INFO [RS:0;101545f66cbd:46815 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T21:20:29,826 INFO [RS:0;101545f66cbd:46815 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T21:20:29,827 INFO [RS:0;101545f66cbd:46815 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T21:20:29,827 INFO [RS:0;101545f66cbd:46815 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T21:20:29,827 DEBUG [RS:0;101545f66cbd:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:20:29,827 DEBUG [RS:0;101545f66cbd:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:20:29,827 DEBUG [RS:0;101545f66cbd:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:20:29,827 DEBUG [RS:0;101545f66cbd:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:20:29,827 DEBUG [RS:0;101545f66cbd:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:20:29,827 DEBUG [RS:0;101545f66cbd:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/101545f66cbd:0, corePoolSize=2, maxPoolSize=2 2024-12-03T21:20:29,827 DEBUG [RS:0;101545f66cbd:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:20:29,828 DEBUG [RS:0;101545f66cbd:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:20:29,828 DEBUG [RS:0;101545f66cbd:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:20:29,828 DEBUG [RS:0;101545f66cbd:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:20:29,828 DEBUG [RS:0;101545f66cbd:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:20:29,828 DEBUG [RS:0;101545f66cbd:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:20:29,828 DEBUG [RS:0;101545f66cbd:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/101545f66cbd:0, corePoolSize=3, maxPoolSize=3 2024-12-03T21:20:29,828 DEBUG [RS:0;101545f66cbd:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0, corePoolSize=3, maxPoolSize=3 2024-12-03T21:20:29,828 INFO [RS:0;101545f66cbd:46815 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T21:20:29,828 INFO [RS:0;101545f66cbd:46815 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T21:20:29,828 INFO [RS:0;101545f66cbd:46815 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T21:20:29,828 INFO [RS:0;101545f66cbd:46815 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T21:20:29,828 INFO [RS:0;101545f66cbd:46815 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T21:20:29,828 INFO [RS:0;101545f66cbd:46815 {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,46815,1733260829446-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T21:20:29,844 INFO [RS:0;101545f66cbd:46815 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T21:20:29,844 INFO [RS:0;101545f66cbd:46815 {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,46815,1733260829446-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T21:20:29,844 INFO [RS:0;101545f66cbd:46815 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:20:29,844 INFO [RS:0;101545f66cbd:46815 {}] regionserver.Replication(171): 101545f66cbd,46815,1733260829446 started 2024-12-03T21:20:29,854 INFO [RS:0;101545f66cbd:46815 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:20:29,854 INFO [RS:0;101545f66cbd:46815 {}] regionserver.HRegionServer(1482): Serving as 101545f66cbd,46815,1733260829446, RpcServer on 101545f66cbd/172.17.0.2:46815, sessionid=0x1019e5b14290001 2024-12-03T21:20:29,855 DEBUG [RS:0;101545f66cbd:46815 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T21:20:29,855 DEBUG [RS:0;101545f66cbd:46815 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 101545f66cbd,46815,1733260829446 2024-12-03T21:20:29,855 DEBUG [RS:0;101545f66cbd:46815 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '101545f66cbd,46815,1733260829446' 2024-12-03T21:20:29,855 DEBUG [RS:0;101545f66cbd:46815 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T21:20:29,855 DEBUG [RS:0;101545f66cbd:46815 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T21:20:29,856 DEBUG [RS:0;101545f66cbd:46815 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T21:20:29,856 DEBUG [RS:0;101545f66cbd:46815 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T21:20:29,856 DEBUG [RS:0;101545f66cbd:46815 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 101545f66cbd,46815,1733260829446 2024-12-03T21:20:29,856 DEBUG [RS:0;101545f66cbd:46815 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '101545f66cbd,46815,1733260829446' 2024-12-03T21:20:29,856 DEBUG [RS:0;101545f66cbd:46815 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T21:20:29,856 DEBUG [RS:0;101545f66cbd:46815 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T21:20:29,857 DEBUG [RS:0;101545f66cbd:46815 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T21:20:29,857 INFO [RS:0;101545f66cbd:46815 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T21:20:29,857 INFO [RS:0;101545f66cbd:46815 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T21:20:29,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:29,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:29,956 WARN [101545f66cbd:36423 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-03T21:20:29,960 INFO [RS:0;101545f66cbd:46815 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=101545f66cbd%2C46815%2C1733260829446, suffix=, logDir=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/WALs/101545f66cbd,46815,1733260829446, archiveDir=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/oldWALs, maxLogs=32 2024-12-03T21:20:29,961 INFO [RS:0;101545f66cbd:46815 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C46815%2C1733260829446.1733260829960 2024-12-03T21:20:29,969 INFO [RS:0;101545f66cbd:46815 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/WALs/101545f66cbd,46815,1733260829446/101545f66cbd%2C46815%2C1733260829446.1733260829960 2024-12-03T21:20:29,970 DEBUG [RS:0;101545f66cbd:46815 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46439:46439),(127.0.0.1/127.0.0.1:34459:34459)] 2024-12-03T21:20:30,207 DEBUG [101545f66cbd:36423 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-03T21:20:30,208 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=101545f66cbd,46815,1733260829446 2024-12-03T21:20:30,211 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 101545f66cbd,46815,1733260829446, state=OPENING 2024-12-03T21:20:30,220 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T21:20:30,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1019e5b14290001, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:20:30,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36423-0x1019e5b14290000, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:20:30,230 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T21:20:30,230 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T21:20:30,230 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T21:20:30,230 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=101545f66cbd,46815,1733260829446}] 2024-12-03T21:20:30,384 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T21:20:30,386 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33617, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T21:20:30,389 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-03T21:20:30,389 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T21:20:30,390 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=101545f66cbd%2C46815%2C1733260829446.meta, suffix=.meta, logDir=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/WALs/101545f66cbd,46815,1733260829446, archiveDir=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/oldWALs, maxLogs=32 2024-12-03T21:20:30,391 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C46815%2C1733260829446.meta.1733260830391.meta 2024-12-03T21:20:30,395 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/WALs/101545f66cbd,46815,1733260829446/101545f66cbd%2C46815%2C1733260829446.meta.1733260830391.meta 2024-12-03T21:20:30,397 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34459:34459),(127.0.0.1/127.0.0.1:46439:46439)] 2024-12-03T21:20:30,397 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T21:20:30,397 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T21:20:30,398 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T21:20:30,398 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T21:20:30,398 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T21:20:30,398 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:20:30,398 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-03T21:20:30,398 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-03T21:20:30,399 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T21:20:30,400 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T21:20:30,400 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:20:30,401 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:20:30,401 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T21:20:30,401 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T21:20:30,401 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:20:30,402 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:20:30,402 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T21:20:30,403 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T21:20:30,403 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:20:30,403 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:20:30,403 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T21:20:30,404 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T21:20:30,404 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:20:30,404 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:20:30,405 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T21:20:30,405 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/hbase/meta/1588230740 2024-12-03T21:20:30,406 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/hbase/meta/1588230740 2024-12-03T21:20:30,407 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T21:20:30,407 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T21:20:30,408 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T21:20:30,409 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T21:20:30,410 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=759314, jitterRate=-0.034482941031455994}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T21:20:30,410 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-03T21:20:30,410 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733260830398Writing region info on filesystem at 1733260830398Initializing all the Stores at 1733260830399 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260830399Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260830399Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260830399Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260830399Cleaning up temporary data from old regions at 1733260830407 (+8 ms)Running coprocessor post-open hooks at 1733260830410 (+3 ms)Region opened successfully at 1733260830410 2024-12-03T21:20:30,411 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733260830384 2024-12-03T21:20:30,413 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T21:20:30,413 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-03T21:20:30,414 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=101545f66cbd,46815,1733260829446 2024-12-03T21:20:30,415 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 101545f66cbd,46815,1733260829446, state=OPEN 2024-12-03T21:20:30,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1019e5b14290001, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T21:20:30,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36423-0x1019e5b14290000, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T21:20:30,455 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=101545f66cbd,46815,1733260829446 2024-12-03T21:20:30,455 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T21:20:30,455 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T21:20:30,457 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T21:20:30,457 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=101545f66cbd,46815,1733260829446 in 225 msec 2024-12-03T21:20:30,459 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T21:20:30,459 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 653 msec 2024-12-03T21:20:30,460 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T21:20:30,460 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-03T21:20:30,461 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T21:20:30,461 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=101545f66cbd,46815,1733260829446, seqNum=-1] 2024-12-03T21:20:30,461 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T21:20:30,462 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37885, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T21:20:30,467 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 708 msec 2024-12-03T21:20:30,467 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733260830467, completionTime=-1 2024-12-03T21:20:30,467 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-03T21:20:30,467 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-03T21:20:30,469 INFO [master/101545f66cbd:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-03T21:20:30,469 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733260890469 2024-12-03T21:20:30,469 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733260950469 2024-12-03T21:20:30,469 INFO [master/101545f66cbd:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-03T21:20:30,469 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,36423,1733260829269-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T21:20:30,469 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,36423,1733260829269-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:20:30,469 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,36423,1733260829269-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:20:30,470 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-101545f66cbd:36423, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:20:30,470 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T21:20:30,470 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-03T21:20:30,471 DEBUG [master/101545f66cbd:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-03T21:20:30,473 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.961sec 2024-12-03T21:20:30,473 INFO [master/101545f66cbd:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T21:20:30,473 INFO [master/101545f66cbd:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T21:20:30,473 INFO [master/101545f66cbd:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T21:20:30,473 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T21:20:30,473 INFO [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T21:20:30,473 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,36423,1733260829269-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T21:20:30,473 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,36423,1733260829269-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T21:20:30,476 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-03T21:20:30,476 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T21:20:30,476 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,36423,1733260829269-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:20:30,574 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3130bf37, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T21:20:30,574 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 101545f66cbd,36423,-1 for getting cluster id 2024-12-03T21:20:30,575 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T21:20:30,577 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a678b682-2f20-4f62-b1bd-a185a4614015' 2024-12-03T21:20:30,577 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T21:20:30,577 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a678b682-2f20-4f62-b1bd-a185a4614015" 2024-12-03T21:20:30,578 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@754209db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T21:20:30,578 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [101545f66cbd,36423,-1] 2024-12-03T21:20:30,578 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T21:20:30,578 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:20:30,580 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41414, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T21:20:30,581 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@367e6ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T21:20:30,581 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T21:20:30,583 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=101545f66cbd,46815,1733260829446, seqNum=-1] 2024-12-03T21:20:30,583 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T21:20:30,585 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36324, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T21:20:30,587 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=101545f66cbd,36423,1733260829269 2024-12-03T21:20:30,587 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:20:30,590 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-03T21:20:30,590 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-03T21:20:30,591 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 101545f66cbd,36423,1733260829269 2024-12-03T21:20:30,591 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3f899f81 2024-12-03T21:20:30,591 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T21:20:30,592 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41416, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T21:20:30,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36423 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-03T21:20:30,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36423 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-03T21:20:30,593 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36423 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T21:20:30,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36423 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-12-03T21:20:30,595 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T21:20:30,596 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:20:30,596 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36423 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-12-03T21:20:30,596 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T21:20:30,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36423 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T21:20:30,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741835_1011 (size=381) 2024-12-03T21:20:30,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741835_1011 (size=381) 2024-12-03T21:20:30,604 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 8b29fa6633d68e784d80fd25d509e979, NAME => 'TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b 2024-12-03T21:20:30,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741836_1012 (size=64) 2024-12-03T21:20:30,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741836_1012 (size=64) 2024-12-03T21:20:30,611 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:20:30,611 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 8b29fa6633d68e784d80fd25d509e979, disabling compactions & flushes 2024-12-03T21:20:30,611 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979. 2024-12-03T21:20:30,611 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979. 2024-12-03T21:20:30,611 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979. after waiting 0 ms 2024-12-03T21:20:30,612 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979. 2024-12-03T21:20:30,612 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979. 2024-12-03T21:20:30,612 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 8b29fa6633d68e784d80fd25d509e979: Waiting for close lock at 1733260830611Disabling compacts and flushes for region at 1733260830611Disabling writes for close at 1733260830611Writing region close event to WAL at 1733260830612 (+1 ms)Closed at 1733260830612 2024-12-03T21:20:30,613 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T21:20:30,613 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733260830613"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733260830613"}]},"ts":"1733260830613"} 2024-12-03T21:20:30,616 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-03T21:20:30,617 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T21:20:30,617 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733260830617"}]},"ts":"1733260830617"} 2024-12-03T21:20:30,619 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-12-03T21:20:30,619 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8b29fa6633d68e784d80fd25d509e979, ASSIGN}] 2024-12-03T21:20:30,620 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8b29fa6633d68e784d80fd25d509e979, ASSIGN 2024-12-03T21:20:30,621 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8b29fa6633d68e784d80fd25d509e979, ASSIGN; state=OFFLINE, location=101545f66cbd,46815,1733260829446; forceNewPlan=false, retain=false 2024-12-03T21:20:30,772 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8b29fa6633d68e784d80fd25d509e979, regionState=OPENING, regionLocation=101545f66cbd,46815,1733260829446 2024-12-03T21:20:30,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8b29fa6633d68e784d80fd25d509e979, ASSIGN because future has completed 2024-12-03T21:20:30,781 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8b29fa6633d68e784d80fd25d509e979, server=101545f66cbd,46815,1733260829446}] 2024-12-03T21:20:30,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:30,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:30,937 INFO [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979. 2024-12-03T21:20:30,938 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 8b29fa6633d68e784d80fd25d509e979, NAME => 'TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979.', STARTKEY => '', ENDKEY => ''} 2024-12-03T21:20:30,938 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 8b29fa6633d68e784d80fd25d509e979 2024-12-03T21:20:30,938 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:20:30,938 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 8b29fa6633d68e784d80fd25d509e979 2024-12-03T21:20:30,938 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 8b29fa6633d68e784d80fd25d509e979 2024-12-03T21:20:30,939 INFO [StoreOpener-8b29fa6633d68e784d80fd25d509e979-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 8b29fa6633d68e784d80fd25d509e979 2024-12-03T21:20:30,941 INFO [StoreOpener-8b29fa6633d68e784d80fd25d509e979-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8b29fa6633d68e784d80fd25d509e979 columnFamilyName info 2024-12-03T21:20:30,941 DEBUG [StoreOpener-8b29fa6633d68e784d80fd25d509e979-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:20:30,941 INFO [StoreOpener-8b29fa6633d68e784d80fd25d509e979-1 {}] regionserver.HStore(327): Store=8b29fa6633d68e784d80fd25d509e979/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T21:20:30,941 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 8b29fa6633d68e784d80fd25d509e979 2024-12-03T21:20:30,942 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979 2024-12-03T21:20:30,942 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979 2024-12-03T21:20:30,943 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 8b29fa6633d68e784d80fd25d509e979 2024-12-03T21:20:30,943 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 8b29fa6633d68e784d80fd25d509e979 2024-12-03T21:20:30,944 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 8b29fa6633d68e784d80fd25d509e979 2024-12-03T21:20:30,946 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T21:20:30,946 INFO [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 8b29fa6633d68e784d80fd25d509e979; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=748404, jitterRate=-0.04835565388202667}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T21:20:30,946 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8b29fa6633d68e784d80fd25d509e979 2024-12-03T21:20:30,947 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 8b29fa6633d68e784d80fd25d509e979: Running coprocessor pre-open hook at 1733260830938Writing region info on filesystem at 1733260830938Initializing all the Stores at 1733260830939 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260830939Cleaning up temporary data from old regions at 1733260830943 (+4 ms)Running coprocessor post-open hooks at 1733260830946 (+3 ms)Region opened successfully at 1733260830947 (+1 ms) 2024-12-03T21:20:30,947 INFO [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979., pid=6, masterSystemTime=1733260830934 2024-12-03T21:20:30,949 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979. 2024-12-03T21:20:30,949 INFO [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979. 2024-12-03T21:20:30,950 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8b29fa6633d68e784d80fd25d509e979, regionState=OPEN, openSeqNum=2, regionLocation=101545f66cbd,46815,1733260829446 2024-12-03T21:20:30,952 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8b29fa6633d68e784d80fd25d509e979, server=101545f66cbd,46815,1733260829446 because future has completed 2024-12-03T21:20:30,956 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-03T21:20:30,956 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 8b29fa6633d68e784d80fd25d509e979, server=101545f66cbd,46815,1733260829446 in 172 msec 2024-12-03T21:20:30,959 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-03T21:20:30,959 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8b29fa6633d68e784d80fd25d509e979, ASSIGN in 337 msec 2024-12-03T21:20:30,960 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T21:20:30,960 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733260830960"}]},"ts":"1733260830960"} 2024-12-03T21:20:30,962 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-12-03T21:20:30,964 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T21:20:30,965 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 371 msec 2024-12-03T21:20:31,818 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:31,818 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:31,819 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:31,819 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:31,819 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:31,819 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:31,820 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:31,820 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:31,838 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:31,839 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:31,839 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:31,839 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:31,839 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:31,839 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:31,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:31,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:31,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:31,843 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:31,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:31,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:32,347 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T21:20:32,348 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:32,348 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:32,348 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:32,348 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:32,348 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:32,349 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:32,349 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:32,349 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:32,374 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:32,374 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:32,374 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:32,375 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:32,375 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:32,376 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:32,380 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:32,381 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:32,381 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:32,384 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:32,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:32,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:33,707 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-03T21:20:33,707 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-03T21:20:33,708 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T21:20:33,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:33,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:34,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:34,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:35,825 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-03T21:20:35,825 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-12-03T21:20:35,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:35,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:36,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:36,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:37,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:37,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:38,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:38,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:39,211 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T21:20:39,213 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:39,213 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:39,214 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:39,214 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:39,214 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:39,214 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:39,215 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:39,215 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:39,240 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:39,241 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:39,241 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:39,241 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:39,242 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:39,242 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:39,245 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:39,246 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:39,246 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:39,249 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:39,910 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:39,910 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:40,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36423 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T21:20:40,665 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-12-03T21:20:40,665 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-12-03T21:20:40,669 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-12-03T21:20:40,669 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979. 2024-12-03T21:20:40,673 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979., hostname=101545f66cbd,46815,1733260829446, seqNum=2] 2024-12-03T21:20:40,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8855): Flush requested on 8b29fa6633d68e784d80fd25d509e979 2024-12-03T21:20:40,730 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8b29fa6633d68e784d80fd25d509e979 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T21:20:40,760 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/.tmp/info/ef00cbe8423648cc895055544c016708 is 1080, key is row0001/info:/1733260840675/Put/seqid=0 2024-12-03T21:20:40,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741837_1013 (size=12509) 2024-12-03T21:20:40,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741837_1013 (size=12509) 2024-12-03T21:20:40,808 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/.tmp/info/ef00cbe8423648cc895055544c016708 2024-12-03T21:20:40,858 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=8b29fa6633d68e784d80fd25d509e979, server=101545f66cbd,46815,1733260829446 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-03T21:20:40,860 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/.tmp/info/ef00cbe8423648cc895055544c016708 as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/ef00cbe8423648cc895055544c016708 2024-12-03T21:20:40,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:36324 deadline: 1733260850857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=8b29fa6633d68e784d80fd25d509e979, server=101545f66cbd,46815,1733260829446 2024-12-03T21:20:40,870 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/ef00cbe8423648cc895055544c016708, entries=7, sequenceid=11, filesize=12.2 K 2024-12-03T21:20:40,872 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 8b29fa6633d68e784d80fd25d509e979 in 142ms, sequenceid=11, compaction requested=false 2024-12-03T21:20:40,872 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8b29fa6633d68e784d80fd25d509e979: 2024-12-03T21:20:40,892 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979., hostname=101545f66cbd,46815,1733260829446, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979., hostname=101545f66cbd,46815,1733260829446, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=8b29fa6633d68e784d80fd25d509e979, server=101545f66cbd,46815,1733260829446 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T21:20:40,893 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979., hostname=101545f66cbd,46815,1733260829446, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=8b29fa6633d68e784d80fd25d509e979, server=101545f66cbd,46815,1733260829446 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T21:20:40,893 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979., hostname=101545f66cbd,46815,1733260829446, seqNum=2 because the exception is null or not the one we care about 2024-12-03T21:20:40,910 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:40,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:41,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:41,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:42,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:42,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:43,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:43,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:44,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:44,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:45,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:45,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:46,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:46,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:47,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:47,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:48,916 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:48,916 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:49,916 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:49,916 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:50,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:50,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:50,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8855): Flush requested on 8b29fa6633d68e784d80fd25d509e979 2024-12-03T21:20:50,967 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8b29fa6633d68e784d80fd25d509e979 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-03T21:20:50,974 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/.tmp/info/201eb85544d04f6897bbe855d38d1ff2 is 1080, key is row0008/info:/1733260840732/Put/seqid=0 2024-12-03T21:20:50,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741838_1014 (size=29761) 2024-12-03T21:20:50,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741838_1014 (size=29761) 2024-12-03T21:20:50,984 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/.tmp/info/201eb85544d04f6897bbe855d38d1ff2 2024-12-03T21:20:50,991 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/.tmp/info/201eb85544d04f6897bbe855d38d1ff2 as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/201eb85544d04f6897bbe855d38d1ff2 2024-12-03T21:20:50,996 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/201eb85544d04f6897bbe855d38d1ff2, entries=23, sequenceid=37, filesize=29.1 K 2024-12-03T21:20:50,997 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 8b29fa6633d68e784d80fd25d509e979 in 31ms, sequenceid=37, compaction requested=false 2024-12-03T21:20:50,997 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8b29fa6633d68e784d80fd25d509e979: 2024-12-03T21:20:50,997 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-12-03T21:20:50,998 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T21:20:50,998 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/201eb85544d04f6897bbe855d38d1ff2 because midkey is the same as first or last row 2024-12-03T21:20:51,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:51,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:52,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:52,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:52,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8855): Flush requested on 8b29fa6633d68e784d80fd25d509e979 2024-12-03T21:20:52,987 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8b29fa6633d68e784d80fd25d509e979 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T21:20:52,991 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/.tmp/info/e3b20acb90ea4b4cbdd98866b665dde1 is 1080, key is row0031/info:/1733260850968/Put/seqid=0 2024-12-03T21:20:52,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741839_1015 (size=12509) 2024-12-03T21:20:52,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741839_1015 (size=12509) 2024-12-03T21:20:52,997 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/.tmp/info/e3b20acb90ea4b4cbdd98866b665dde1 2024-12-03T21:20:53,004 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/.tmp/info/e3b20acb90ea4b4cbdd98866b665dde1 as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/e3b20acb90ea4b4cbdd98866b665dde1 2024-12-03T21:20:53,011 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/e3b20acb90ea4b4cbdd98866b665dde1, entries=7, sequenceid=47, filesize=12.2 K 2024-12-03T21:20:53,012 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 8b29fa6633d68e784d80fd25d509e979 in 26ms, sequenceid=47, compaction requested=true 2024-12-03T21:20:53,012 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8b29fa6633d68e784d80fd25d509e979: 2024-12-03T21:20:53,012 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-12-03T21:20:53,012 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T21:20:53,012 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/201eb85544d04f6897bbe855d38d1ff2 because midkey is the same as first or last row 2024-12-03T21:20:53,013 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8b29fa6633d68e784d80fd25d509e979:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T21:20:53,013 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T21:20:53,013 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T21:20:53,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8855): Flush requested on 8b29fa6633d68e784d80fd25d509e979 2024-12-03T21:20:53,014 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8b29fa6633d68e784d80fd25d509e979 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-03T21:20:53,014 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T21:20:53,014 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1541): 8b29fa6633d68e784d80fd25d509e979/info is initiating minor compaction (all files) 2024-12-03T21:20:53,014 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8b29fa6633d68e784d80fd25d509e979/info in TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979. 2024-12-03T21:20:53,014 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/ef00cbe8423648cc895055544c016708, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/201eb85544d04f6897bbe855d38d1ff2, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/e3b20acb90ea4b4cbdd98866b665dde1] into tmpdir=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/.tmp, totalSize=53.5 K 2024-12-03T21:20:53,015 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.Compactor(225): Compacting ef00cbe8423648cc895055544c016708, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733260840675 2024-12-03T21:20:53,015 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.Compactor(225): Compacting 201eb85544d04f6897bbe855d38d1ff2, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733260840732 2024-12-03T21:20:53,016 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.Compactor(225): Compacting e3b20acb90ea4b4cbdd98866b665dde1, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733260850968 2024-12-03T21:20:53,019 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/.tmp/info/17d3ae3cc0ba4131af11cb90ab3dd03e is 1080, key is row0038/info:/1733260852988/Put/seqid=0 2024-12-03T21:20:53,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741840_1016 (size=16817) 2024-12-03T21:20:53,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741840_1016 (size=16817) 2024-12-03T21:20:53,031 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=61 (bloomFilter=true), to=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/.tmp/info/17d3ae3cc0ba4131af11cb90ab3dd03e 2024-12-03T21:20:53,035 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8b29fa6633d68e784d80fd25d509e979#info#compaction#58 average throughput is 18.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T21:20:53,035 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/.tmp/info/2109d31fac0a4f63be9fa9a242fc8d06 is 1080, key is row0001/info:/1733260840675/Put/seqid=0 2024-12-03T21:20:53,040 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/.tmp/info/17d3ae3cc0ba4131af11cb90ab3dd03e as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/17d3ae3cc0ba4131af11cb90ab3dd03e 2024-12-03T21:20:53,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741841_1017 (size=44978) 2024-12-03T21:20:53,049 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/17d3ae3cc0ba4131af11cb90ab3dd03e, entries=11, sequenceid=61, filesize=16.4 K 2024-12-03T21:20:53,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741841_1017 (size=44978) 2024-12-03T21:20:53,053 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=13.66 KB/13988 for 8b29fa6633d68e784d80fd25d509e979 in 38ms, sequenceid=61, compaction requested=false 2024-12-03T21:20:53,057 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8b29fa6633d68e784d80fd25d509e979: 2024-12-03T21:20:53,057 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=69.9 K, sizeToCheck=16.0 K 2024-12-03T21:20:53,058 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T21:20:53,058 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/201eb85544d04f6897bbe855d38d1ff2 because midkey is the same as first or last row 2024-12-03T21:20:53,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8855): Flush requested on 8b29fa6633d68e784d80fd25d509e979 2024-12-03T21:20:53,061 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8b29fa6633d68e784d80fd25d509e979 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-12-03T21:20:53,073 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/.tmp/info/2109d31fac0a4f63be9fa9a242fc8d06 as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/2109d31fac0a4f63be9fa9a242fc8d06 2024-12-03T21:20:53,076 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/.tmp/info/3cef70a928e34c21b45c87991716df70 is 1080, key is row0049/info:/1733260853015/Put/seqid=0 2024-12-03T21:20:53,092 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8b29fa6633d68e784d80fd25d509e979/info of 8b29fa6633d68e784d80fd25d509e979 into 2109d31fac0a4f63be9fa9a242fc8d06(size=43.9 K), total size for store is 60.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T21:20:53,092 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8b29fa6633d68e784d80fd25d509e979: 2024-12-03T21:20:53,092 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979., storeName=8b29fa6633d68e784d80fd25d509e979/info, priority=13, startTime=1733260853012; duration=0sec 2024-12-03T21:20:53,092 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=60.3 K, sizeToCheck=16.0 K 2024-12-03T21:20:53,092 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T21:20:53,092 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/2109d31fac0a4f63be9fa9a242fc8d06 because midkey is the same as first or last row 2024-12-03T21:20:53,092 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=60.3 K, sizeToCheck=16.0 K 2024-12-03T21:20:53,092 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T21:20:53,092 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/2109d31fac0a4f63be9fa9a242fc8d06 because midkey is the same as first or last row 2024-12-03T21:20:53,092 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=60.3 K, sizeToCheck=16.0 K 2024-12-03T21:20:53,092 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T21:20:53,092 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/2109d31fac0a4f63be9fa9a242fc8d06 because midkey is the same as first or last row 2024-12-03T21:20:53,092 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T21:20:53,093 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8b29fa6633d68e784d80fd25d509e979:info 2024-12-03T21:20:53,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741842_1018 (size=22222) 2024-12-03T21:20:53,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741842_1018 (size=22222) 2024-12-03T21:20:53,103 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/.tmp/info/3cef70a928e34c21b45c87991716df70 2024-12-03T21:20:53,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/.tmp/info/3cef70a928e34c21b45c87991716df70 as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/3cef70a928e34c21b45c87991716df70 2024-12-03T21:20:53,122 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/3cef70a928e34c21b45c87991716df70, entries=16, sequenceid=80, filesize=21.7 K 2024-12-03T21:20:53,128 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=0 B/0 for 8b29fa6633d68e784d80fd25d509e979 in 66ms, sequenceid=80, compaction requested=true 2024-12-03T21:20:53,128 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8b29fa6633d68e784d80fd25d509e979: 2024-12-03T21:20:53,128 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=82.0 K, sizeToCheck=16.0 K 2024-12-03T21:20:53,128 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T21:20:53,128 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/2109d31fac0a4f63be9fa9a242fc8d06 because midkey is the same as first or last row 2024-12-03T21:20:53,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8b29fa6633d68e784d80fd25d509e979:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T21:20:53,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T21:20:53,128 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T21:20:53,130 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 84017 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T21:20:53,130 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1541): 8b29fa6633d68e784d80fd25d509e979/info is initiating minor compaction (all files) 2024-12-03T21:20:53,130 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8b29fa6633d68e784d80fd25d509e979/info in TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979. 2024-12-03T21:20:53,130 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/2109d31fac0a4f63be9fa9a242fc8d06, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/17d3ae3cc0ba4131af11cb90ab3dd03e, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/3cef70a928e34c21b45c87991716df70] into tmpdir=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/.tmp, totalSize=82.0 K 2024-12-03T21:20:53,131 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2109d31fac0a4f63be9fa9a242fc8d06, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733260840675 2024-12-03T21:20:53,131 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.Compactor(225): Compacting 17d3ae3cc0ba4131af11cb90ab3dd03e, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=61, earliestPutTs=1733260852988 2024-12-03T21:20:53,132 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3cef70a928e34c21b45c87991716df70, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1733260853015 2024-12-03T21:20:53,156 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8b29fa6633d68e784d80fd25d509e979#info#compaction#60 average throughput is 32.84 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T21:20:53,156 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/.tmp/info/42549edde49946f8aaacc07e1233d23c is 1080, key is row0001/info:/1733260840675/Put/seqid=0 2024-12-03T21:20:53,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741843_1019 (size=74301) 2024-12-03T21:20:53,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741843_1019 (size=74301) 2024-12-03T21:20:53,176 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/.tmp/info/42549edde49946f8aaacc07e1233d23c as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/42549edde49946f8aaacc07e1233d23c 2024-12-03T21:20:53,186 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8b29fa6633d68e784d80fd25d509e979/info of 8b29fa6633d68e784d80fd25d509e979 into 42549edde49946f8aaacc07e1233d23c(size=72.6 K), total size for store is 72.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T21:20:53,186 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8b29fa6633d68e784d80fd25d509e979: 2024-12-03T21:20:53,186 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979., storeName=8b29fa6633d68e784d80fd25d509e979/info, priority=13, startTime=1733260853128; duration=0sec 2024-12-03T21:20:53,186 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-12-03T21:20:53,187 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T21:20:53,187 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-12-03T21:20:53,187 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T21:20:53,187 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-12-03T21:20:53,187 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T21:20:53,191 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T21:20:53,191 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T21:20:53,191 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8b29fa6633d68e784d80fd25d509e979:info 2024-12-03T21:20:53,192 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36423 {}] assignment.AssignmentManager(1363): Split request from 101545f66cbd,46815,1733260829446, parent={ENCODED => 8b29fa6633d68e784d80fd25d509e979, NAME => 'TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-03T21:20:53,198 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36423 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=101545f66cbd,46815,1733260829446 2024-12-03T21:20:53,203 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36423 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=8b29fa6633d68e784d80fd25d509e979, daughterA=db378a0bdb82104b38e97404c19dfca3, daughterB=fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:20:53,204 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=8b29fa6633d68e784d80fd25d509e979, daughterA=db378a0bdb82104b38e97404c19dfca3, daughterB=fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:20:53,204 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=8b29fa6633d68e784d80fd25d509e979, daughterA=db378a0bdb82104b38e97404c19dfca3, daughterB=fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:20:53,205 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=8b29fa6633d68e784d80fd25d509e979, daughterA=db378a0bdb82104b38e97404c19dfca3, daughterB=fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:20:53,213 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8b29fa6633d68e784d80fd25d509e979, UNASSIGN}] 2024-12-03T21:20:53,214 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8b29fa6633d68e784d80fd25d509e979, UNASSIGN 2024-12-03T21:20:53,216 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=8b29fa6633d68e784d80fd25d509e979, regionState=CLOSING, regionLocation=101545f66cbd,46815,1733260829446 2024-12-03T21:20:53,218 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8b29fa6633d68e784d80fd25d509e979, UNASSIGN because future has completed 2024-12-03T21:20:53,219 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-03T21:20:53,219 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8b29fa6633d68e784d80fd25d509e979, server=101545f66cbd,46815,1733260829446}] 2024-12-03T21:20:53,375 INFO [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 8b29fa6633d68e784d80fd25d509e979 2024-12-03T21:20:53,376 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-03T21:20:53,376 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 8b29fa6633d68e784d80fd25d509e979, disabling compactions & flushes 2024-12-03T21:20:53,376 INFO [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979. 2024-12-03T21:20:53,376 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979. 2024-12-03T21:20:53,376 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979. after waiting 0 ms 2024-12-03T21:20:53,376 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979. 2024-12-03T21:20:53,377 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/ef00cbe8423648cc895055544c016708, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/201eb85544d04f6897bbe855d38d1ff2, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/2109d31fac0a4f63be9fa9a242fc8d06, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/e3b20acb90ea4b4cbdd98866b665dde1, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/17d3ae3cc0ba4131af11cb90ab3dd03e, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/3cef70a928e34c21b45c87991716df70] to archive 2024-12-03T21:20:53,378 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T21:20:53,380 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/ef00cbe8423648cc895055544c016708 to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/archive/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/ef00cbe8423648cc895055544c016708 2024-12-03T21:20:53,382 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/201eb85544d04f6897bbe855d38d1ff2 to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/archive/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/201eb85544d04f6897bbe855d38d1ff2 2024-12-03T21:20:53,384 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/2109d31fac0a4f63be9fa9a242fc8d06 to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/archive/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/2109d31fac0a4f63be9fa9a242fc8d06 2024-12-03T21:20:53,386 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/e3b20acb90ea4b4cbdd98866b665dde1 to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/archive/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/e3b20acb90ea4b4cbdd98866b665dde1 2024-12-03T21:20:53,387 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/17d3ae3cc0ba4131af11cb90ab3dd03e to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/archive/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/17d3ae3cc0ba4131af11cb90ab3dd03e 2024-12-03T21:20:53,388 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/3cef70a928e34c21b45c87991716df70 to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/archive/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/3cef70a928e34c21b45c87991716df70 2024-12-03T21:20:53,402 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/recovered.edits/85.seqid, newMaxSeqId=85, maxSeqId=1 2024-12-03T21:20:53,403 INFO [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979. 2024-12-03T21:20:53,403 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 8b29fa6633d68e784d80fd25d509e979: Waiting for close lock at 1733260853376Running coprocessor pre-close hooks at 1733260853376Disabling compacts and flushes for region at 1733260853376Disabling writes for close at 1733260853376Writing region close event to WAL at 1733260853399 (+23 ms)Running coprocessor post-close hooks at 1733260853403 (+4 ms)Closed at 1733260853403 2024-12-03T21:20:53,406 INFO [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 8b29fa6633d68e784d80fd25d509e979 2024-12-03T21:20:53,406 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=8b29fa6633d68e784d80fd25d509e979, regionState=CLOSED 2024-12-03T21:20:53,408 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8b29fa6633d68e784d80fd25d509e979, server=101545f66cbd,46815,1733260829446 because future has completed 2024-12-03T21:20:53,411 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-12-03T21:20:53,411 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 8b29fa6633d68e784d80fd25d509e979, server=101545f66cbd,46815,1733260829446 in 190 msec 2024-12-03T21:20:53,413 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-03T21:20:53,413 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8b29fa6633d68e784d80fd25d509e979, UNASSIGN in 198 msec 2024-12-03T21:20:53,435 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:20:53,439 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 1 storefiles, region=8b29fa6633d68e784d80fd25d509e979, threads=1 2024-12-03T21:20:53,441 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/42549edde49946f8aaacc07e1233d23c for region: 8b29fa6633d68e784d80fd25d509e979 2024-12-03T21:20:53,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741844_1020 (size=27) 2024-12-03T21:20:53,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741844_1020 (size=27) 2024-12-03T21:20:53,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741845_1021 (size=27) 2024-12-03T21:20:53,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741845_1021 (size=27) 2024-12-03T21:20:53,479 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/42549edde49946f8aaacc07e1233d23c for region: 8b29fa6633d68e784d80fd25d509e979 2024-12-03T21:20:53,481 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 8b29fa6633d68e784d80fd25d509e979 Daughter A: [hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/db378a0bdb82104b38e97404c19dfca3/info/42549edde49946f8aaacc07e1233d23c.8b29fa6633d68e784d80fd25d509e979] storefiles, Daughter B: [hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/42549edde49946f8aaacc07e1233d23c.8b29fa6633d68e784d80fd25d509e979] storefiles. 2024-12-03T21:20:53,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741846_1022 (size=71) 2024-12-03T21:20:53,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741846_1022 (size=71) 2024-12-03T21:20:53,515 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:20:53,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741847_1023 (size=71) 2024-12-03T21:20:53,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741847_1023 (size=71) 2024-12-03T21:20:53,542 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:20:53,567 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/db378a0bdb82104b38e97404c19dfca3/recovered.edits/85.seqid, newMaxSeqId=85, maxSeqId=-1 2024-12-03T21:20:53,573 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/recovered.edits/85.seqid, newMaxSeqId=85, maxSeqId=-1 2024-12-03T21:20:53,578 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733260853577"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1733260853577"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1733260853577"}]},"ts":"1733260853577"} 2024-12-03T21:20:53,578 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733260853198.db378a0bdb82104b38e97404c19dfca3.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733260853577"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733260853577"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733260853577"}]},"ts":"1733260853577"} 2024-12-03T21:20:53,578 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733260853577"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733260853577"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733260853577"}]},"ts":"1733260853577"} 2024-12-03T21:20:53,604 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=db378a0bdb82104b38e97404c19dfca3, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=fed75a742a271d54b31ef1b61aa6e434, ASSIGN}] 2024-12-03T21:20:53,606 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=db378a0bdb82104b38e97404c19dfca3, ASSIGN 2024-12-03T21:20:53,606 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=fed75a742a271d54b31ef1b61aa6e434, ASSIGN 2024-12-03T21:20:53,607 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=db378a0bdb82104b38e97404c19dfca3, ASSIGN; state=SPLITTING_NEW, location=101545f66cbd,46815,1733260829446; forceNewPlan=false, retain=false 2024-12-03T21:20:53,608 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=fed75a742a271d54b31ef1b61aa6e434, ASSIGN; state=SPLITTING_NEW, location=101545f66cbd,46815,1733260829446; forceNewPlan=false, retain=false 2024-12-03T21:20:53,707 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-03T21:20:53,758 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=db378a0bdb82104b38e97404c19dfca3, regionState=OPENING, regionLocation=101545f66cbd,46815,1733260829446 2024-12-03T21:20:53,758 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=fed75a742a271d54b31ef1b61aa6e434, regionState=OPENING, regionLocation=101545f66cbd,46815,1733260829446 2024-12-03T21:20:53,760 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=fed75a742a271d54b31ef1b61aa6e434, ASSIGN because future has completed 2024-12-03T21:20:53,761 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure fed75a742a271d54b31ef1b61aa6e434, server=101545f66cbd,46815,1733260829446}] 2024-12-03T21:20:53,762 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=db378a0bdb82104b38e97404c19dfca3, ASSIGN because future has completed 2024-12-03T21:20:53,763 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure db378a0bdb82104b38e97404c19dfca3, server=101545f66cbd,46815,1733260829446}] 2024-12-03T21:20:53,916 INFO [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733260853198.db378a0bdb82104b38e97404c19dfca3. 2024-12-03T21:20:53,916 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => db378a0bdb82104b38e97404c19dfca3, NAME => 'TestLogRolling-testLogRolling,,1733260853198.db378a0bdb82104b38e97404c19dfca3.', STARTKEY => '', ENDKEY => 'row0062'} 2024-12-03T21:20:53,916 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling db378a0bdb82104b38e97404c19dfca3 2024-12-03T21:20:53,916 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733260853198.db378a0bdb82104b38e97404c19dfca3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:20:53,916 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for db378a0bdb82104b38e97404c19dfca3 2024-12-03T21:20:53,916 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for db378a0bdb82104b38e97404c19dfca3 2024-12-03T21:20:53,917 INFO [StoreOpener-db378a0bdb82104b38e97404c19dfca3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region db378a0bdb82104b38e97404c19dfca3 2024-12-03T21:20:53,918 INFO [StoreOpener-db378a0bdb82104b38e97404c19dfca3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region db378a0bdb82104b38e97404c19dfca3 columnFamilyName info 2024-12-03T21:20:53,918 DEBUG [StoreOpener-db378a0bdb82104b38e97404c19dfca3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:20:53,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:53,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:53,929 DEBUG [StoreOpener-db378a0bdb82104b38e97404c19dfca3-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/db378a0bdb82104b38e97404c19dfca3/info/42549edde49946f8aaacc07e1233d23c.8b29fa6633d68e784d80fd25d509e979->hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/42549edde49946f8aaacc07e1233d23c-bottom 2024-12-03T21:20:53,930 INFO [StoreOpener-db378a0bdb82104b38e97404c19dfca3-1 {}] regionserver.HStore(327): Store=db378a0bdb82104b38e97404c19dfca3/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T21:20:53,930 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for db378a0bdb82104b38e97404c19dfca3 2024-12-03T21:20:53,930 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/db378a0bdb82104b38e97404c19dfca3 2024-12-03T21:20:53,931 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/db378a0bdb82104b38e97404c19dfca3 2024-12-03T21:20:53,932 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for db378a0bdb82104b38e97404c19dfca3 2024-12-03T21:20:53,932 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for db378a0bdb82104b38e97404c19dfca3 2024-12-03T21:20:53,934 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for db378a0bdb82104b38e97404c19dfca3 2024-12-03T21:20:53,935 INFO [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened db378a0bdb82104b38e97404c19dfca3; next sequenceid=86; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=819638, jitterRate=0.042223989963531494}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T21:20:53,935 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for db378a0bdb82104b38e97404c19dfca3 2024-12-03T21:20:53,935 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for db378a0bdb82104b38e97404c19dfca3: Running coprocessor pre-open hook at 1733260853916Writing region info on filesystem at 1733260853916Initializing all the Stores at 1733260853917 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260853917Cleaning up temporary data from old regions at 1733260853932 (+15 ms)Running coprocessor post-open hooks at 1733260853935 (+3 ms)Region opened successfully at 1733260853935 2024-12-03T21:20:53,936 INFO [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733260853198.db378a0bdb82104b38e97404c19dfca3., pid=13, masterSystemTime=1733260853913 2024-12-03T21:20:53,936 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store db378a0bdb82104b38e97404c19dfca3:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T21:20:53,936 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T21:20:53,936 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-03T21:20:53,937 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1733260853198.db378a0bdb82104b38e97404c19dfca3. 2024-12-03T21:20:53,937 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1541): db378a0bdb82104b38e97404c19dfca3/info is initiating minor compaction (all files) 2024-12-03T21:20:53,937 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of db378a0bdb82104b38e97404c19dfca3/info in TestLogRolling-testLogRolling,,1733260853198.db378a0bdb82104b38e97404c19dfca3. 2024-12-03T21:20:53,938 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/db378a0bdb82104b38e97404c19dfca3/info/42549edde49946f8aaacc07e1233d23c.8b29fa6633d68e784d80fd25d509e979->hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/42549edde49946f8aaacc07e1233d23c-bottom] into tmpdir=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/db378a0bdb82104b38e97404c19dfca3/.tmp, totalSize=72.6 K 2024-12-03T21:20:53,938 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.Compactor(225): Compacting 42549edde49946f8aaacc07e1233d23c.8b29fa6633d68e784d80fd25d509e979, keycount=32, bloomtype=ROW, size=72.6 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1733260840675 2024-12-03T21:20:53,939 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733260853198.db378a0bdb82104b38e97404c19dfca3. 2024-12-03T21:20:53,939 INFO [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733260853198.db378a0bdb82104b38e97404c19dfca3. 2024-12-03T21:20:53,939 INFO [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434. 2024-12-03T21:20:53,939 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => fed75a742a271d54b31ef1b61aa6e434, NAME => 'TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434.', STARTKEY => 'row0062', ENDKEY => ''} 2024-12-03T21:20:53,940 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:20:53,940 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=db378a0bdb82104b38e97404c19dfca3, regionState=OPEN, openSeqNum=86, regionLocation=101545f66cbd,46815,1733260829446 2024-12-03T21:20:53,940 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:20:53,940 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:20:53,940 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:20:53,942 INFO [StoreOpener-fed75a742a271d54b31ef1b61aa6e434-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:20:53,943 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-12-03T21:20:53,943 INFO [StoreOpener-fed75a742a271d54b31ef1b61aa6e434-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fed75a742a271d54b31ef1b61aa6e434 columnFamilyName info 2024-12-03T21:20:53,943 DEBUG [StoreOpener-fed75a742a271d54b31ef1b61aa6e434-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:20:53,943 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-03T21:20:53,943 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-12-03T21:20:53,943 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure db378a0bdb82104b38e97404c19dfca3, server=101545f66cbd,46815,1733260829446 because future has completed 2024-12-03T21:20:53,945 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36423 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=101545f66cbd,46815,1733260829446, table=TestLogRolling-testLogRolling, region=db378a0bdb82104b38e97404c19dfca3. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-03T21:20:53,948 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-12-03T21:20:53,948 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure db378a0bdb82104b38e97404c19dfca3, server=101545f66cbd,46815,1733260829446 in 182 msec 2024-12-03T21:20:53,949 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=db378a0bdb82104b38e97404c19dfca3, ASSIGN in 344 msec 2024-12-03T21:20:53,961 DEBUG [StoreOpener-fed75a742a271d54b31ef1b61aa6e434-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/42549edde49946f8aaacc07e1233d23c.8b29fa6633d68e784d80fd25d509e979->hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/42549edde49946f8aaacc07e1233d23c-top 2024-12-03T21:20:53,962 INFO [StoreOpener-fed75a742a271d54b31ef1b61aa6e434-1 {}] regionserver.HStore(327): Store=fed75a742a271d54b31ef1b61aa6e434/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T21:20:53,962 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:20:53,962 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:20:53,964 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:20:53,964 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:20:53,964 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:20:53,966 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:20:53,967 INFO [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened fed75a742a271d54b31ef1b61aa6e434; next sequenceid=86; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=741177, jitterRate=-0.057544708251953125}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T21:20:53,967 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:20:53,967 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for fed75a742a271d54b31ef1b61aa6e434: Running coprocessor pre-open hook at 1733260853940Writing region info on filesystem at 1733260853940Initializing all the Stores at 1733260853941 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260853941Cleaning up temporary data from old regions at 1733260853964 (+23 ms)Running coprocessor post-open hooks at 1733260853967 (+3 ms)Region opened successfully at 1733260853967 2024-12-03T21:20:53,968 INFO [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434., pid=12, masterSystemTime=1733260853913 2024-12-03T21:20:53,968 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store fed75a742a271d54b31ef1b61aa6e434:info, priority=-2147483648, current under compaction store size is 2 2024-12-03T21:20:53,968 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T21:20:53,968 DEBUG [RS:0;101545f66cbd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-03T21:20:53,969 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/hbase/meta/1588230740/.tmp/info/bcfbb38b9e9445fc9ece8dba677b5ffc is 193, key is TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434./info:regioninfo/1733260853758/Put/seqid=0 2024-12-03T21:20:53,970 INFO [RS:0;101545f66cbd:46815-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434. 2024-12-03T21:20:53,970 DEBUG [RS:0;101545f66cbd:46815-longCompactions-0 {}] regionserver.HStore(1541): fed75a742a271d54b31ef1b61aa6e434/info is initiating minor compaction (all files) 2024-12-03T21:20:53,970 INFO [RS:0;101545f66cbd:46815-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of fed75a742a271d54b31ef1b61aa6e434/info in TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434. 2024-12-03T21:20:53,970 INFO [RS:0;101545f66cbd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/42549edde49946f8aaacc07e1233d23c.8b29fa6633d68e784d80fd25d509e979->hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/42549edde49946f8aaacc07e1233d23c-top] into tmpdir=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp, totalSize=72.6 K 2024-12-03T21:20:53,971 DEBUG [RS:0;101545f66cbd:46815-longCompactions-0 {}] compactions.Compactor(225): Compacting 42549edde49946f8aaacc07e1233d23c.8b29fa6633d68e784d80fd25d509e979, keycount=32, bloomtype=ROW, size=72.6 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1733260840675 2024-12-03T21:20:53,971 DEBUG [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434. 2024-12-03T21:20:53,971 INFO [RS_OPEN_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434. 2024-12-03T21:20:53,973 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): db378a0bdb82104b38e97404c19dfca3#info#compaction#62 average throughput is 12.52 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T21:20:53,973 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=fed75a742a271d54b31ef1b61aa6e434, regionState=OPEN, openSeqNum=86, regionLocation=101545f66cbd,46815,1733260829446 2024-12-03T21:20:53,974 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/db378a0bdb82104b38e97404c19dfca3/.tmp/info/eea95af742a741c79333d8592aea2a23 is 1080, key is row0001/info:/1733260840675/Put/seqid=0 2024-12-03T21:20:53,976 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure fed75a742a271d54b31ef1b61aa6e434, server=101545f66cbd,46815,1733260829446 because future has completed 2024-12-03T21:20:53,986 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-12-03T21:20:53,986 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure fed75a742a271d54b31ef1b61aa6e434, server=101545f66cbd,46815,1733260829446 in 223 msec 2024-12-03T21:20:53,989 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-12-03T21:20:53,989 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=fed75a742a271d54b31ef1b61aa6e434, ASSIGN in 382 msec 2024-12-03T21:20:53,991 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=8b29fa6633d68e784d80fd25d509e979, daughterA=db378a0bdb82104b38e97404c19dfca3, daughterB=fed75a742a271d54b31ef1b61aa6e434 in 791 msec 2024-12-03T21:20:53,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741849_1025 (size=70862) 2024-12-03T21:20:53,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741849_1025 (size=70862) 2024-12-03T21:20:53,996 INFO [RS:0;101545f66cbd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fed75a742a271d54b31ef1b61aa6e434#info#compaction#63 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T21:20:53,996 DEBUG [RS:0;101545f66cbd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/12b3de28781e407294b0b7a42f575e44 is 1080, key is row0062/info:/1733260853052/Put/seqid=0 2024-12-03T21:20:53,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741848_1024 (size=9847) 2024-12-03T21:20:53,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741848_1024 (size=9847) 2024-12-03T21:20:53,998 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/hbase/meta/1588230740/.tmp/info/bcfbb38b9e9445fc9ece8dba677b5ffc 2024-12-03T21:20:54,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741850_1026 (size=8260) 2024-12-03T21:20:54,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741850_1026 (size=8260) 2024-12-03T21:20:54,017 DEBUG [RS:0;101545f66cbd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/12b3de28781e407294b0b7a42f575e44 as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/12b3de28781e407294b0b7a42f575e44 2024-12-03T21:20:54,020 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/hbase/meta/1588230740/.tmp/ns/f02a68b0857249e1a72275ea53437eaf is 43, key is default/ns:d/1733260830463/Put/seqid=0 2024-12-03T21:20:54,024 DEBUG [RS:0;101545f66cbd:46815-longCompactions-0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-12-03T21:20:54,025 INFO [RS:0;101545f66cbd:46815-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in fed75a742a271d54b31ef1b61aa6e434/info of fed75a742a271d54b31ef1b61aa6e434 into 12b3de28781e407294b0b7a42f575e44(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T21:20:54,025 DEBUG [RS:0;101545f66cbd:46815-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for fed75a742a271d54b31ef1b61aa6e434: 2024-12-03T21:20:54,025 INFO [RS:0;101545f66cbd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434., storeName=fed75a742a271d54b31ef1b61aa6e434/info, priority=15, startTime=1733260853968; duration=0sec 2024-12-03T21:20:54,025 DEBUG [RS:0;101545f66cbd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T21:20:54,025 DEBUG [RS:0;101545f66cbd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fed75a742a271d54b31ef1b61aa6e434:info 2024-12-03T21:20:54,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741851_1027 (size=5153) 2024-12-03T21:20:54,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741851_1027 (size=5153) 2024-12-03T21:20:54,030 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/hbase/meta/1588230740/.tmp/ns/f02a68b0857249e1a72275ea53437eaf 2024-12-03T21:20:54,049 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/hbase/meta/1588230740/.tmp/table/4117874dcb54441b8a6b11e33ec4594c is 65, key is TestLogRolling-testLogRolling/table:state/1733260830960/Put/seqid=0 2024-12-03T21:20:54,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741852_1028 (size=5340) 2024-12-03T21:20:54,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741852_1028 (size=5340) 2024-12-03T21:20:54,063 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/hbase/meta/1588230740/.tmp/table/4117874dcb54441b8a6b11e33ec4594c 2024-12-03T21:20:54,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/hbase/meta/1588230740/.tmp/info/bcfbb38b9e9445fc9ece8dba677b5ffc as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/hbase/meta/1588230740/info/bcfbb38b9e9445fc9ece8dba677b5ffc 2024-12-03T21:20:54,075 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/hbase/meta/1588230740/info/bcfbb38b9e9445fc9ece8dba677b5ffc, entries=30, sequenceid=17, filesize=9.6 K 2024-12-03T21:20:54,076 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/hbase/meta/1588230740/.tmp/ns/f02a68b0857249e1a72275ea53437eaf as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/hbase/meta/1588230740/ns/f02a68b0857249e1a72275ea53437eaf 2024-12-03T21:20:54,082 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/hbase/meta/1588230740/ns/f02a68b0857249e1a72275ea53437eaf, entries=2, sequenceid=17, filesize=5.0 K 2024-12-03T21:20:54,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/hbase/meta/1588230740/.tmp/table/4117874dcb54441b8a6b11e33ec4594c as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/hbase/meta/1588230740/table/4117874dcb54441b8a6b11e33ec4594c 2024-12-03T21:20:54,088 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/hbase/meta/1588230740/table/4117874dcb54441b8a6b11e33ec4594c, entries=2, sequenceid=17, filesize=5.2 K 2024-12-03T21:20:54,089 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 146ms, sequenceid=17, compaction requested=false 2024-12-03T21:20:54,089 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-03T21:20:54,398 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/db378a0bdb82104b38e97404c19dfca3/.tmp/info/eea95af742a741c79333d8592aea2a23 as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/db378a0bdb82104b38e97404c19dfca3/info/eea95af742a741c79333d8592aea2a23 2024-12-03T21:20:54,404 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in db378a0bdb82104b38e97404c19dfca3/info of db378a0bdb82104b38e97404c19dfca3 into eea95af742a741c79333d8592aea2a23(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T21:20:54,404 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for db378a0bdb82104b38e97404c19dfca3: 2024-12-03T21:20:54,404 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733260853198.db378a0bdb82104b38e97404c19dfca3., storeName=db378a0bdb82104b38e97404c19dfca3/info, priority=15, startTime=1733260853936; duration=0sec 2024-12-03T21:20:54,404 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T21:20:54,404 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db378a0bdb82104b38e97404c19dfca3:info 2024-12-03T21:20:54,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:54,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:55,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:36324 deadline: 1733260865065, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979. is not online on 101545f66cbd,46815,1733260829446 2024-12-03T21:20:55,066 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979., hostname=101545f66cbd,46815,1733260829446, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979., hostname=101545f66cbd,46815,1733260829446, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979. is not online on 101545f66cbd,46815,1733260829446 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T21:20:55,066 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979., hostname=101545f66cbd,46815,1733260829446, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979. is not online on 101545f66cbd,46815,1733260829446 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T21:20:55,066 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1733260830592.8b29fa6633d68e784d80fd25d509e979., hostname=101545f66cbd,46815,1733260829446, seqNum=2 from cache 2024-12-03T21:20:55,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:55,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:56,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:56,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:57,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:57,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:58,404 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,404 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,405 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,405 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,405 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,405 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,435 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,435 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,435 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,435 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,436 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,436 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,441 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,441 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,441 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,445 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:58,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:58,952 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T21:20:58,954 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,955 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,955 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,955 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,956 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,956 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,977 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,977 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,978 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,978 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,978 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,978 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,980 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,980 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,981 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:58,983 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:20:59,246 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T21:20:59,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:20:59,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:00,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:00,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:01,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:01,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:02,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:02,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:03,707 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-03T21:21:03,708 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-03T21:21:03,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:03,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:04,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:04,929 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:05,102 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0065', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434., hostname=101545f66cbd,46815,1733260829446, seqNum=86] 2024-12-03T21:21:05,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8855): Flush requested on fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:21:05,115 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fed75a742a271d54b31ef1b61aa6e434 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T21:21:05,119 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/c9a67ff6e2f141f48559f7296e573473 is 1080, key is row0065/info:/1733260865103/Put/seqid=0 2024-12-03T21:21:05,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741853_1029 (size=12509) 2024-12-03T21:21:05,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741853_1029 (size=12509) 2024-12-03T21:21:05,135 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/c9a67ff6e2f141f48559f7296e573473 2024-12-03T21:21:05,142 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/c9a67ff6e2f141f48559f7296e573473 as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/c9a67ff6e2f141f48559f7296e573473 2024-12-03T21:21:05,149 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/c9a67ff6e2f141f48559f7296e573473, entries=7, sequenceid=96, filesize=12.2 K 2024-12-03T21:21:05,151 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for fed75a742a271d54b31ef1b61aa6e434 in 36ms, sequenceid=96, compaction requested=false 2024-12-03T21:21:05,151 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fed75a742a271d54b31ef1b61aa6e434: 2024-12-03T21:21:05,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8855): Flush requested on fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:21:05,154 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fed75a742a271d54b31ef1b61aa6e434 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-03T21:21:05,159 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/f23bfd0eb78f4cca95b229a82087defc is 1080, key is row0072/info:/1733260865116/Put/seqid=0 2024-12-03T21:21:05,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741854_1030 (size=20064) 2024-12-03T21:21:05,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741854_1030 (size=20064) 2024-12-03T21:21:05,179 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/f23bfd0eb78f4cca95b229a82087defc 2024-12-03T21:21:05,186 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/f23bfd0eb78f4cca95b229a82087defc as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/f23bfd0eb78f4cca95b229a82087defc 2024-12-03T21:21:05,191 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/f23bfd0eb78f4cca95b229a82087defc, entries=14, sequenceid=113, filesize=19.6 K 2024-12-03T21:21:05,192 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=11.56 KB/11836 for fed75a742a271d54b31ef1b61aa6e434 in 38ms, sequenceid=113, compaction requested=true 2024-12-03T21:21:05,192 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fed75a742a271d54b31ef1b61aa6e434: 2024-12-03T21:21:05,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fed75a742a271d54b31ef1b61aa6e434:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T21:21:05,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T21:21:05,192 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T21:21:05,193 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40833 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T21:21:05,193 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1541): fed75a742a271d54b31ef1b61aa6e434/info is initiating minor compaction (all files) 2024-12-03T21:21:05,193 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of fed75a742a271d54b31ef1b61aa6e434/info in TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434. 2024-12-03T21:21:05,193 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/12b3de28781e407294b0b7a42f575e44, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/c9a67ff6e2f141f48559f7296e573473, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/f23bfd0eb78f4cca95b229a82087defc] into tmpdir=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp, totalSize=39.9 K 2024-12-03T21:21:05,193 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.Compactor(225): Compacting 12b3de28781e407294b0b7a42f575e44, keycount=3, bloomtype=ROW, size=8.1 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1733260853052 2024-12-03T21:21:05,194 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.Compactor(225): Compacting c9a67ff6e2f141f48559f7296e573473, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733260865103 2024-12-03T21:21:05,194 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.Compactor(225): Compacting f23bfd0eb78f4cca95b229a82087defc, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1733260865116 2024-12-03T21:21:05,206 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fed75a742a271d54b31ef1b61aa6e434#info#compaction#68 average throughput is 12.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T21:21:05,206 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/c6fa659271ce4fedbcc69ec16a154199 is 1080, key is row0062/info:/1733260853052/Put/seqid=0 2024-12-03T21:21:05,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741855_1031 (size=31009) 2024-12-03T21:21:05,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741855_1031 (size=31009) 2024-12-03T21:21:05,222 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/c6fa659271ce4fedbcc69ec16a154199 as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/c6fa659271ce4fedbcc69ec16a154199 2024-12-03T21:21:05,228 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in fed75a742a271d54b31ef1b61aa6e434/info of fed75a742a271d54b31ef1b61aa6e434 into c6fa659271ce4fedbcc69ec16a154199(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T21:21:05,228 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for fed75a742a271d54b31ef1b61aa6e434: 2024-12-03T21:21:05,228 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434., storeName=fed75a742a271d54b31ef1b61aa6e434/info, priority=13, startTime=1733260865192; duration=0sec 2024-12-03T21:21:05,228 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T21:21:05,228 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fed75a742a271d54b31ef1b61aa6e434:info 2024-12-03T21:21:05,929 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:05,929 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:06,930 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:06,931 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:07,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8855): Flush requested on fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:21:07,185 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fed75a742a271d54b31ef1b61aa6e434 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-03T21:21:07,192 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/ed013b885f504bd18b9290367ace446e is 1080, key is row0086/info:/1733260865155/Put/seqid=0 2024-12-03T21:21:07,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741856_1032 (size=17895) 2024-12-03T21:21:07,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741856_1032 (size=17895) 2024-12-03T21:21:07,198 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/ed013b885f504bd18b9290367ace446e 2024-12-03T21:21:07,204 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/ed013b885f504bd18b9290367ace446e as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/ed013b885f504bd18b9290367ace446e 2024-12-03T21:21:07,210 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/ed013b885f504bd18b9290367ace446e, entries=12, sequenceid=129, filesize=17.5 K 2024-12-03T21:21:07,211 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for fed75a742a271d54b31ef1b61aa6e434 in 26ms, sequenceid=129, compaction requested=false 2024-12-03T21:21:07,211 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fed75a742a271d54b31ef1b61aa6e434: 2024-12-03T21:21:07,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8855): Flush requested on fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:21:07,211 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fed75a742a271d54b31ef1b61aa6e434 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-03T21:21:07,215 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/d92bc7174bd34333b9c6b944b4544929 is 1080, key is row0098/info:/1733260867189/Put/seqid=0 2024-12-03T21:21:07,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741857_1033 (size=16828) 2024-12-03T21:21:07,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741857_1033 (size=16828) 2024-12-03T21:21:07,220 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=143 (bloomFilter=true), to=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/d92bc7174bd34333b9c6b944b4544929 2024-12-03T21:21:07,226 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/d92bc7174bd34333b9c6b944b4544929 as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/d92bc7174bd34333b9c6b944b4544929 2024-12-03T21:21:07,231 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/d92bc7174bd34333b9c6b944b4544929, entries=11, sequenceid=143, filesize=16.4 K 2024-12-03T21:21:07,232 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=10.51 KB/10760 for fed75a742a271d54b31ef1b61aa6e434 in 21ms, sequenceid=143, compaction requested=true 2024-12-03T21:21:07,232 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fed75a742a271d54b31ef1b61aa6e434: 2024-12-03T21:21:07,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fed75a742a271d54b31ef1b61aa6e434:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T21:21:07,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T21:21:07,232 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T21:21:07,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8855): Flush requested on fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:21:07,234 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 65732 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T21:21:07,234 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1541): fed75a742a271d54b31ef1b61aa6e434/info is initiating minor compaction (all files) 2024-12-03T21:21:07,234 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fed75a742a271d54b31ef1b61aa6e434 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-03T21:21:07,234 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of fed75a742a271d54b31ef1b61aa6e434/info in TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434. 2024-12-03T21:21:07,234 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/c6fa659271ce4fedbcc69ec16a154199, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/ed013b885f504bd18b9290367ace446e, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/d92bc7174bd34333b9c6b944b4544929] into tmpdir=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp, totalSize=64.2 K 2024-12-03T21:21:07,234 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.Compactor(225): Compacting c6fa659271ce4fedbcc69ec16a154199, keycount=24, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1733260853052 2024-12-03T21:21:07,235 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.Compactor(225): Compacting ed013b885f504bd18b9290367ace446e, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733260865155 2024-12-03T21:21:07,235 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.Compactor(225): Compacting d92bc7174bd34333b9c6b944b4544929, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=143, earliestPutTs=1733260867189 2024-12-03T21:21:07,238 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/4f559c03005a4f07a6b4b4c324661930 is 1080, key is row0109/info:/1733260867213/Put/seqid=0 2024-12-03T21:21:07,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741858_1034 (size=16828) 2024-12-03T21:21:07,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741858_1034 (size=16828) 2024-12-03T21:21:07,245 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/4f559c03005a4f07a6b4b4c324661930 2024-12-03T21:21:07,246 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fed75a742a271d54b31ef1b61aa6e434#info#compaction#72 average throughput is 48.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T21:21:07,247 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/25dced6fbaa940f18244c63096434a2d is 1080, key is row0062/info:/1733260853052/Put/seqid=0 2024-12-03T21:21:07,254 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/4f559c03005a4f07a6b4b4c324661930 as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/4f559c03005a4f07a6b4b4c324661930 2024-12-03T21:21:07,261 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/4f559c03005a4f07a6b4b4c324661930, entries=11, sequenceid=157, filesize=16.4 K 2024-12-03T21:21:07,261 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=9.46 KB/9684 for fed75a742a271d54b31ef1b61aa6e434 in 27ms, sequenceid=157, compaction requested=false 2024-12-03T21:21:07,262 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fed75a742a271d54b31ef1b61aa6e434: 2024-12-03T21:21:07,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741859_1035 (size=55934) 2024-12-03T21:21:07,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741859_1035 (size=55934) 2024-12-03T21:21:07,270 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/25dced6fbaa940f18244c63096434a2d as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/25dced6fbaa940f18244c63096434a2d 2024-12-03T21:21:07,276 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in fed75a742a271d54b31ef1b61aa6e434/info of fed75a742a271d54b31ef1b61aa6e434 into 25dced6fbaa940f18244c63096434a2d(size=54.6 K), total size for store is 71.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T21:21:07,276 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for fed75a742a271d54b31ef1b61aa6e434: 2024-12-03T21:21:07,276 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434., storeName=fed75a742a271d54b31ef1b61aa6e434/info, priority=13, startTime=1733260867232; duration=0sec 2024-12-03T21:21:07,276 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T21:21:07,276 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fed75a742a271d54b31ef1b61aa6e434:info 2024-12-03T21:21:07,931 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:07,931 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:08,932 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:08,932 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:09,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8855): Flush requested on fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:21:09,263 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fed75a742a271d54b31ef1b61aa6e434 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-12-03T21:21:09,270 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/7e56cf78eb7642068dd461b5a5d26509 is 1080, key is row0120/info:/1733260867235/Put/seqid=0 2024-12-03T21:21:09,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741860_1036 (size=15750) 2024-12-03T21:21:09,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741860_1036 (size=15750) 2024-12-03T21:21:09,275 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/7e56cf78eb7642068dd461b5a5d26509 2024-12-03T21:21:09,299 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/7e56cf78eb7642068dd461b5a5d26509 as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/7e56cf78eb7642068dd461b5a5d26509 2024-12-03T21:21:09,305 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/7e56cf78eb7642068dd461b5a5d26509, entries=10, sequenceid=171, filesize=15.4 K 2024-12-03T21:21:09,306 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=11.56 KB/11836 for fed75a742a271d54b31ef1b61aa6e434 in 43ms, sequenceid=171, compaction requested=true 2024-12-03T21:21:09,306 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fed75a742a271d54b31ef1b61aa6e434: 2024-12-03T21:21:09,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8855): Flush requested on fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:21:09,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fed75a742a271d54b31ef1b61aa6e434:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T21:21:09,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T21:21:09,306 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T21:21:09,307 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fed75a742a271d54b31ef1b61aa6e434 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-03T21:21:09,308 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 88512 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T21:21:09,308 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1541): fed75a742a271d54b31ef1b61aa6e434/info is initiating minor compaction (all files) 2024-12-03T21:21:09,308 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of fed75a742a271d54b31ef1b61aa6e434/info in TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434. 2024-12-03T21:21:09,308 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/25dced6fbaa940f18244c63096434a2d, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/4f559c03005a4f07a6b4b4c324661930, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/7e56cf78eb7642068dd461b5a5d26509] into tmpdir=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp, totalSize=86.4 K 2024-12-03T21:21:09,308 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.Compactor(225): Compacting 25dced6fbaa940f18244c63096434a2d, keycount=47, bloomtype=ROW, size=54.6 K, encoding=NONE, compression=NONE, seqNum=143, earliestPutTs=1733260853052 2024-12-03T21:21:09,309 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4f559c03005a4f07a6b4b4c324661930, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733260867213 2024-12-03T21:21:09,309 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7e56cf78eb7642068dd461b5a5d26509, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733260867235 2024-12-03T21:21:09,311 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/70b4c099db0f4200a35e97b6deb11728 is 1080, key is row0130/info:/1733260869266/Put/seqid=0 2024-12-03T21:21:09,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741861_1037 (size=17906) 2024-12-03T21:21:09,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741861_1037 (size=17906) 2024-12-03T21:21:09,325 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fed75a742a271d54b31ef1b61aa6e434#info#compaction#75 average throughput is 34.89 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T21:21:09,326 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/f310208e3fb7408d909f30bc03048d8e is 1080, key is row0062/info:/1733260853052/Put/seqid=0 2024-12-03T21:21:09,326 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=186 (bloomFilter=true), to=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/70b4c099db0f4200a35e97b6deb11728 2024-12-03T21:21:09,334 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/70b4c099db0f4200a35e97b6deb11728 as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/70b4c099db0f4200a35e97b6deb11728 2024-12-03T21:21:09,342 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/70b4c099db0f4200a35e97b6deb11728, entries=12, sequenceid=186, filesize=17.5 K 2024-12-03T21:21:09,343 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=15.76 KB/16140 for fed75a742a271d54b31ef1b61aa6e434 in 36ms, sequenceid=186, compaction requested=false 2024-12-03T21:21:09,343 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fed75a742a271d54b31ef1b61aa6e434: 2024-12-03T21:21:09,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8855): Flush requested on fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:21:09,345 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fed75a742a271d54b31ef1b61aa6e434 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-12-03T21:21:09,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741862_1038 (size=78811) 2024-12-03T21:21:09,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741862_1038 (size=78811) 2024-12-03T21:21:09,356 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/fcc6b854dbd44a02969e87b3acf2f4b5 is 1080, key is row0142/info:/1733260869308/Put/seqid=0 2024-12-03T21:21:09,361 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/f310208e3fb7408d909f30bc03048d8e as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/f310208e3fb7408d909f30bc03048d8e 2024-12-03T21:21:09,368 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in fed75a742a271d54b31ef1b61aa6e434/info of fed75a742a271d54b31ef1b61aa6e434 into f310208e3fb7408d909f30bc03048d8e(size=77.0 K), total size for store is 94.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T21:21:09,368 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for fed75a742a271d54b31ef1b61aa6e434: 2024-12-03T21:21:09,368 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434., storeName=fed75a742a271d54b31ef1b61aa6e434/info, priority=13, startTime=1733260869306; duration=0sec 2024-12-03T21:21:09,368 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T21:21:09,368 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fed75a742a271d54b31ef1b61aa6e434:info 2024-12-03T21:21:09,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741863_1039 (size=23316) 2024-12-03T21:21:09,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741863_1039 (size=23316) 2024-12-03T21:21:09,376 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/fcc6b854dbd44a02969e87b3acf2f4b5 2024-12-03T21:21:09,382 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/fcc6b854dbd44a02969e87b3acf2f4b5 as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/fcc6b854dbd44a02969e87b3acf2f4b5 2024-12-03T21:21:09,397 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/fcc6b854dbd44a02969e87b3acf2f4b5, entries=17, sequenceid=206, filesize=22.8 K 2024-12-03T21:21:09,398 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=2.10 KB/2152 for fed75a742a271d54b31ef1b61aa6e434 in 53ms, sequenceid=206, compaction requested=true 2024-12-03T21:21:09,399 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fed75a742a271d54b31ef1b61aa6e434: 2024-12-03T21:21:09,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fed75a742a271d54b31ef1b61aa6e434:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T21:21:09,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T21:21:09,399 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T21:21:09,400 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 120033 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T21:21:09,400 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1541): fed75a742a271d54b31ef1b61aa6e434/info is initiating minor compaction (all files) 2024-12-03T21:21:09,400 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of fed75a742a271d54b31ef1b61aa6e434/info in TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434. 2024-12-03T21:21:09,400 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/f310208e3fb7408d909f30bc03048d8e, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/70b4c099db0f4200a35e97b6deb11728, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/fcc6b854dbd44a02969e87b3acf2f4b5] into tmpdir=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp, totalSize=117.2 K 2024-12-03T21:21:09,400 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.Compactor(225): Compacting f310208e3fb7408d909f30bc03048d8e, keycount=68, bloomtype=ROW, size=77.0 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733260853052 2024-12-03T21:21:09,401 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.Compactor(225): Compacting 70b4c099db0f4200a35e97b6deb11728, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=186, earliestPutTs=1733260869266 2024-12-03T21:21:09,401 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.Compactor(225): Compacting fcc6b854dbd44a02969e87b3acf2f4b5, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1733260869308 2024-12-03T21:21:09,413 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fed75a742a271d54b31ef1b61aa6e434#info#compaction#77 average throughput is 33.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T21:21:09,414 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/8b3dabff78954d7786827e0df329ea4d is 1080, key is row0062/info:/1733260853052/Put/seqid=0 2024-12-03T21:21:09,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741864_1040 (size=110183) 2024-12-03T21:21:09,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741864_1040 (size=110183) 2024-12-03T21:21:09,463 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/8b3dabff78954d7786827e0df329ea4d as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/8b3dabff78954d7786827e0df329ea4d 2024-12-03T21:21:09,476 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in fed75a742a271d54b31ef1b61aa6e434/info of fed75a742a271d54b31ef1b61aa6e434 into 8b3dabff78954d7786827e0df329ea4d(size=107.6 K), total size for store is 107.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T21:21:09,476 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for fed75a742a271d54b31ef1b61aa6e434: 2024-12-03T21:21:09,476 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434., storeName=fed75a742a271d54b31ef1b61aa6e434/info, priority=13, startTime=1733260869399; duration=0sec 2024-12-03T21:21:09,476 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T21:21:09,476 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fed75a742a271d54b31ef1b61aa6e434:info 2024-12-03T21:21:09,933 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:09,933 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:10,516 INFO [master/101545f66cbd:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-03T21:21:10,516 INFO [master/101545f66cbd:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-03T21:21:10,934 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:10,934 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:11,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8855): Flush requested on fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:21:11,365 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fed75a742a271d54b31ef1b61aa6e434 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T21:21:11,370 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/eb44eb1b602f499abeb369f700fb58fa is 1080, key is row0159/info:/1733260869346/Put/seqid=0 2024-12-03T21:21:11,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741865_1041 (size=12516) 2024-12-03T21:21:11,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741865_1041 (size=12516) 2024-12-03T21:21:11,376 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=218 (bloomFilter=true), to=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/eb44eb1b602f499abeb369f700fb58fa 2024-12-03T21:21:11,382 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/eb44eb1b602f499abeb369f700fb58fa as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/eb44eb1b602f499abeb369f700fb58fa 2024-12-03T21:21:11,387 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/eb44eb1b602f499abeb369f700fb58fa, entries=7, sequenceid=218, filesize=12.2 K 2024-12-03T21:21:11,387 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for fed75a742a271d54b31ef1b61aa6e434 in 22ms, sequenceid=218, compaction requested=false 2024-12-03T21:21:11,388 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fed75a742a271d54b31ef1b61aa6e434: 2024-12-03T21:21:11,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8855): Flush requested on fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:21:11,388 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fed75a742a271d54b31ef1b61aa6e434 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-03T21:21:11,391 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/daa2dccf93e9406abdd9759e293fd58f is 1080, key is row0166/info:/1733260871367/Put/seqid=0 2024-12-03T21:21:11,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741866_1042 (size=16828) 2024-12-03T21:21:11,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741866_1042 (size=16828) 2024-12-03T21:21:11,396 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/daa2dccf93e9406abdd9759e293fd58f 2024-12-03T21:21:11,403 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/daa2dccf93e9406abdd9759e293fd58f as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/daa2dccf93e9406abdd9759e293fd58f 2024-12-03T21:21:11,408 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/daa2dccf93e9406abdd9759e293fd58f, entries=11, sequenceid=232, filesize=16.4 K 2024-12-03T21:21:11,410 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=11.56 KB/11836 for fed75a742a271d54b31ef1b61aa6e434 in 21ms, sequenceid=232, compaction requested=true 2024-12-03T21:21:11,410 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fed75a742a271d54b31ef1b61aa6e434: 2024-12-03T21:21:11,410 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fed75a742a271d54b31ef1b61aa6e434:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T21:21:11,410 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T21:21:11,410 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T21:21:11,411 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 139527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T21:21:11,411 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1541): fed75a742a271d54b31ef1b61aa6e434/info is initiating minor compaction (all files) 2024-12-03T21:21:11,411 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of fed75a742a271d54b31ef1b61aa6e434/info in TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434. 2024-12-03T21:21:11,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8855): Flush requested on fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:21:11,411 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/8b3dabff78954d7786827e0df329ea4d, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/eb44eb1b602f499abeb369f700fb58fa, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/daa2dccf93e9406abdd9759e293fd58f] into tmpdir=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp, totalSize=136.3 K 2024-12-03T21:21:11,411 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fed75a742a271d54b31ef1b61aa6e434 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-03T21:21:11,412 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8b3dabff78954d7786827e0df329ea4d, keycount=97, bloomtype=ROW, size=107.6 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1733260853052 2024-12-03T21:21:11,412 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.Compactor(225): Compacting eb44eb1b602f499abeb369f700fb58fa, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=218, earliestPutTs=1733260869346 2024-12-03T21:21:11,412 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.Compactor(225): Compacting daa2dccf93e9406abdd9759e293fd58f, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733260871367 2024-12-03T21:21:11,415 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/5de607ef689f4bab841ecbc3334aaff0 is 1080, key is row0177/info:/1733260871389/Put/seqid=0 2024-12-03T21:21:11,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741867_1043 (size=19000) 2024-12-03T21:21:11,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741867_1043 (size=19000) 2024-12-03T21:21:11,420 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/5de607ef689f4bab841ecbc3334aaff0 2024-12-03T21:21:11,423 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fed75a742a271d54b31ef1b61aa6e434#info#compaction#81 average throughput is 59.00 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T21:21:11,423 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/f483c7a6b2684045b169a72a19685320 is 1080, key is row0062/info:/1733260853052/Put/seqid=0 2024-12-03T21:21:11,425 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/5de607ef689f4bab841ecbc3334aaff0 as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/5de607ef689f4bab841ecbc3334aaff0 2024-12-03T21:21:11,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741868_1044 (size=129821) 2024-12-03T21:21:11,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741868_1044 (size=129821) 2024-12-03T21:21:11,431 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/5de607ef689f4bab841ecbc3334aaff0, entries=13, sequenceid=248, filesize=18.6 K 2024-12-03T21:21:11,432 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/f483c7a6b2684045b169a72a19685320 as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/f483c7a6b2684045b169a72a19685320 2024-12-03T21:21:11,432 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=3.15 KB/3228 for fed75a742a271d54b31ef1b61aa6e434 in 21ms, sequenceid=248, compaction requested=false 2024-12-03T21:21:11,432 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fed75a742a271d54b31ef1b61aa6e434: 2024-12-03T21:21:11,437 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in fed75a742a271d54b31ef1b61aa6e434/info of fed75a742a271d54b31ef1b61aa6e434 into f483c7a6b2684045b169a72a19685320(size=126.8 K), total size for store is 145.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T21:21:11,437 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for fed75a742a271d54b31ef1b61aa6e434: 2024-12-03T21:21:11,437 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434., storeName=fed75a742a271d54b31ef1b61aa6e434/info, priority=13, startTime=1733260871410; duration=0sec 2024-12-03T21:21:11,437 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T21:21:11,437 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fed75a742a271d54b31ef1b61aa6e434:info 2024-12-03T21:21:11,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:11,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:12,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:12,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:13,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8855): Flush requested on fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:21:13,427 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fed75a742a271d54b31ef1b61aa6e434 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T21:21:13,432 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/154411d193f64a02b459d365308d3f9f is 1080, key is row0190/info:/1733260871412/Put/seqid=0 2024-12-03T21:21:13,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741869_1045 (size=12519) 2024-12-03T21:21:13,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741869_1045 (size=12519) 2024-12-03T21:21:13,439 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/154411d193f64a02b459d365308d3f9f 2024-12-03T21:21:13,443 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/154411d193f64a02b459d365308d3f9f as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/154411d193f64a02b459d365308d3f9f 2024-12-03T21:21:13,448 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/154411d193f64a02b459d365308d3f9f, entries=7, sequenceid=259, filesize=12.2 K 2024-12-03T21:21:13,449 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for fed75a742a271d54b31ef1b61aa6e434 in 22ms, sequenceid=259, compaction requested=true 2024-12-03T21:21:13,449 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fed75a742a271d54b31ef1b61aa6e434: 2024-12-03T21:21:13,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fed75a742a271d54b31ef1b61aa6e434:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T21:21:13,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T21:21:13,449 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T21:21:13,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8855): Flush requested on fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:21:13,450 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fed75a742a271d54b31ef1b61aa6e434 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-03T21:21:13,450 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 161340 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T21:21:13,451 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1541): fed75a742a271d54b31ef1b61aa6e434/info is initiating minor compaction (all files) 2024-12-03T21:21:13,451 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of fed75a742a271d54b31ef1b61aa6e434/info in TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434. 2024-12-03T21:21:13,451 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/f483c7a6b2684045b169a72a19685320, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/5de607ef689f4bab841ecbc3334aaff0, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/154411d193f64a02b459d365308d3f9f] into tmpdir=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp, totalSize=157.6 K 2024-12-03T21:21:13,451 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.Compactor(225): Compacting f483c7a6b2684045b169a72a19685320, keycount=115, bloomtype=ROW, size=126.8 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733260853052 2024-12-03T21:21:13,452 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5de607ef689f4bab841ecbc3334aaff0, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733260871389 2024-12-03T21:21:13,452 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.Compactor(225): Compacting 154411d193f64a02b459d365308d3f9f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1733260871412 2024-12-03T21:21:13,454 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/035fb8755beb466abd8d00de20b0f26f is 1080, key is row0197/info:/1733260873428/Put/seqid=0 2024-12-03T21:21:13,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741870_1046 (size=16839) 2024-12-03T21:21:13,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741870_1046 (size=16839) 2024-12-03T21:21:13,465 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/035fb8755beb466abd8d00de20b0f26f 2024-12-03T21:21:13,467 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fed75a742a271d54b31ef1b61aa6e434#info#compaction#84 average throughput is 46.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T21:21:13,467 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/2fd635d83a5742dbbc6205a0139645ab is 1080, key is row0062/info:/1733260853052/Put/seqid=0 2024-12-03T21:21:13,472 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/035fb8755beb466abd8d00de20b0f26f as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/035fb8755beb466abd8d00de20b0f26f 2024-12-03T21:21:13,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741871_1047 (size=151559) 2024-12-03T21:21:13,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741871_1047 (size=151559) 2024-12-03T21:21:13,478 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/035fb8755beb466abd8d00de20b0f26f, entries=11, sequenceid=273, filesize=16.4 K 2024-12-03T21:21:13,479 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=12.61 KB/12912 for fed75a742a271d54b31ef1b61aa6e434 in 30ms, sequenceid=273, compaction requested=false 2024-12-03T21:21:13,479 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fed75a742a271d54b31ef1b61aa6e434: 2024-12-03T21:21:13,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8855): Flush requested on fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:21:13,481 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fed75a742a271d54b31ef1b61aa6e434 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-03T21:21:13,482 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/2fd635d83a5742dbbc6205a0139645ab as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/2fd635d83a5742dbbc6205a0139645ab 2024-12-03T21:21:13,485 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/7158aca93b5b49ee8e462c4455f67ee8 is 1080, key is row0208/info:/1733260873451/Put/seqid=0 2024-12-03T21:21:13,489 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in fed75a742a271d54b31ef1b61aa6e434/info of fed75a742a271d54b31ef1b61aa6e434 into 2fd635d83a5742dbbc6205a0139645ab(size=148.0 K), total size for store is 164.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T21:21:13,489 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for fed75a742a271d54b31ef1b61aa6e434: 2024-12-03T21:21:13,489 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434., storeName=fed75a742a271d54b31ef1b61aa6e434/info, priority=13, startTime=1733260873449; duration=0sec 2024-12-03T21:21:13,489 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T21:21:13,489 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fed75a742a271d54b31ef1b61aa6e434:info 2024-12-03T21:21:13,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741872_1048 (size=20092) 2024-12-03T21:21:13,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741872_1048 (size=20092) 2024-12-03T21:21:13,491 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/7158aca93b5b49ee8e462c4455f67ee8 2024-12-03T21:21:13,495 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/7158aca93b5b49ee8e462c4455f67ee8 as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/7158aca93b5b49ee8e462c4455f67ee8 2024-12-03T21:21:13,499 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/7158aca93b5b49ee8e462c4455f67ee8, entries=14, sequenceid=290, filesize=19.6 K 2024-12-03T21:21:13,500 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=3.15 KB/3228 for fed75a742a271d54b31ef1b61aa6e434 in 19ms, sequenceid=290, compaction requested=true 2024-12-03T21:21:13,501 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fed75a742a271d54b31ef1b61aa6e434: 2024-12-03T21:21:13,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fed75a742a271d54b31ef1b61aa6e434:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T21:21:13,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T21:21:13,501 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T21:21:13,502 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 188490 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T21:21:13,502 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1541): fed75a742a271d54b31ef1b61aa6e434/info is initiating minor compaction (all files) 2024-12-03T21:21:13,502 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of fed75a742a271d54b31ef1b61aa6e434/info in TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434. 2024-12-03T21:21:13,502 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/2fd635d83a5742dbbc6205a0139645ab, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/035fb8755beb466abd8d00de20b0f26f, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/7158aca93b5b49ee8e462c4455f67ee8] into tmpdir=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp, totalSize=184.1 K 2024-12-03T21:21:13,502 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2fd635d83a5742dbbc6205a0139645ab, keycount=135, bloomtype=ROW, size=148.0 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1733260853052 2024-12-03T21:21:13,503 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.Compactor(225): Compacting 035fb8755beb466abd8d00de20b0f26f, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1733260873428 2024-12-03T21:21:13,503 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7158aca93b5b49ee8e462c4455f67ee8, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733260873451 2024-12-03T21:21:13,513 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fed75a742a271d54b31ef1b61aa6e434#info#compaction#86 average throughput is 54.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T21:21:13,513 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/2ac0a60b152d4f03ac0836b34dcde13a is 1080, key is row0062/info:/1733260853052/Put/seqid=0 2024-12-03T21:21:13,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741873_1049 (size=178644) 2024-12-03T21:21:13,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741873_1049 (size=178644) 2024-12-03T21:21:13,921 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/2ac0a60b152d4f03ac0836b34dcde13a as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/2ac0a60b152d4f03ac0836b34dcde13a 2024-12-03T21:21:13,926 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in fed75a742a271d54b31ef1b61aa6e434/info of fed75a742a271d54b31ef1b61aa6e434 into 2ac0a60b152d4f03ac0836b34dcde13a(size=174.5 K), total size for store is 174.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T21:21:13,926 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for fed75a742a271d54b31ef1b61aa6e434: 2024-12-03T21:21:13,926 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434., storeName=fed75a742a271d54b31ef1b61aa6e434/info, priority=13, startTime=1733260873501; duration=0sec 2024-12-03T21:21:13,927 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T21:21:13,927 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fed75a742a271d54b31ef1b61aa6e434:info 2024-12-03T21:21:13,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:13,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:14,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:14,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:15,398 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-12-03T21:21:15,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8855): Flush requested on fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:21:15,498 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fed75a742a271d54b31ef1b61aa6e434 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T21:21:15,502 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/ce07050be4694576bc347c4a5665a81e is 1080, key is row0222/info:/1733260873482/Put/seqid=0 2024-12-03T21:21:15,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741874_1050 (size=12523) 2024-12-03T21:21:15,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741874_1050 (size=12523) 2024-12-03T21:21:15,518 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=302 (bloomFilter=true), to=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/ce07050be4694576bc347c4a5665a81e 2024-12-03T21:21:15,525 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/ce07050be4694576bc347c4a5665a81e as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/ce07050be4694576bc347c4a5665a81e 2024-12-03T21:21:15,532 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/ce07050be4694576bc347c4a5665a81e, entries=7, sequenceid=302, filesize=12.2 K 2024-12-03T21:21:15,533 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=15.76 KB/16140 for fed75a742a271d54b31ef1b61aa6e434 in 36ms, sequenceid=302, compaction requested=false 2024-12-03T21:21:15,533 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fed75a742a271d54b31ef1b61aa6e434: 2024-12-03T21:21:15,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8855): Flush requested on fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:21:15,534 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fed75a742a271d54b31ef1b61aa6e434 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-12-03T21:21:15,538 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/c20ee2e9ca1d4c8ebcd094e3c947d5a0 is 1080, key is row0229/info:/1733260875499/Put/seqid=0 2024-12-03T21:21:15,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741875_1051 (size=22254) 2024-12-03T21:21:15,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741875_1051 (size=22254) 2024-12-03T21:21:15,548 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/c20ee2e9ca1d4c8ebcd094e3c947d5a0 2024-12-03T21:21:15,557 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/c20ee2e9ca1d4c8ebcd094e3c947d5a0 as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/c20ee2e9ca1d4c8ebcd094e3c947d5a0 2024-12-03T21:21:15,565 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/c20ee2e9ca1d4c8ebcd094e3c947d5a0, entries=16, sequenceid=321, filesize=21.7 K 2024-12-03T21:21:15,566 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=11.56 KB/11836 for fed75a742a271d54b31ef1b61aa6e434 in 32ms, sequenceid=321, compaction requested=true 2024-12-03T21:21:15,566 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fed75a742a271d54b31ef1b61aa6e434: 2024-12-03T21:21:15,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8855): Flush requested on fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:21:15,567 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T21:21:15,567 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fed75a742a271d54b31ef1b61aa6e434:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T21:21:15,567 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T21:21:15,567 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing fed75a742a271d54b31ef1b61aa6e434 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-03T21:21:15,568 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 213421 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T21:21:15,568 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1541): fed75a742a271d54b31ef1b61aa6e434/info is initiating minor compaction (all files) 2024-12-03T21:21:15,568 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of fed75a742a271d54b31ef1b61aa6e434/info in TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434. 2024-12-03T21:21:15,568 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/2ac0a60b152d4f03ac0836b34dcde13a, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/ce07050be4694576bc347c4a5665a81e, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/c20ee2e9ca1d4c8ebcd094e3c947d5a0] into tmpdir=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp, totalSize=208.4 K 2024-12-03T21:21:15,569 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2ac0a60b152d4f03ac0836b34dcde13a, keycount=160, bloomtype=ROW, size=174.5 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733260853052 2024-12-03T21:21:15,569 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.Compactor(225): Compacting ce07050be4694576bc347c4a5665a81e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1733260873482 2024-12-03T21:21:15,570 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] compactions.Compactor(225): Compacting c20ee2e9ca1d4c8ebcd094e3c947d5a0, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1733260875499 2024-12-03T21:21:15,578 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/c1cf2125cb3f45cdab6054b396d4c4e7 is 1080, key is row0245/info:/1733260875535/Put/seqid=0 2024-12-03T21:21:15,585 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fed75a742a271d54b31ef1b61aa6e434#info#compaction#90 average throughput is 46.95 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T21:21:15,586 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/66c9260fbc4f4ee2a07290e8e56ae2e5 is 1080, key is row0062/info:/1733260853052/Put/seqid=0 2024-12-03T21:21:15,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741876_1052 (size=17918) 2024-12-03T21:21:15,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741876_1052 (size=17918) 2024-12-03T21:21:15,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741877_1053 (size=203571) 2024-12-03T21:21:15,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741877_1053 (size=203571) 2024-12-03T21:21:15,593 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/c1cf2125cb3f45cdab6054b396d4c4e7 2024-12-03T21:21:15,597 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/66c9260fbc4f4ee2a07290e8e56ae2e5 as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/66c9260fbc4f4ee2a07290e8e56ae2e5 2024-12-03T21:21:15,597 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/.tmp/info/c1cf2125cb3f45cdab6054b396d4c4e7 as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/c1cf2125cb3f45cdab6054b396d4c4e7 2024-12-03T21:21:15,602 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/c1cf2125cb3f45cdab6054b396d4c4e7, entries=12, sequenceid=336, filesize=17.5 K 2024-12-03T21:21:15,603 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=0 B/0 for fed75a742a271d54b31ef1b61aa6e434 in 36ms, sequenceid=336, compaction requested=false 2024-12-03T21:21:15,603 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for fed75a742a271d54b31ef1b61aa6e434: 2024-12-03T21:21:15,603 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in fed75a742a271d54b31ef1b61aa6e434/info of fed75a742a271d54b31ef1b61aa6e434 into 66c9260fbc4f4ee2a07290e8e56ae2e5(size=198.8 K), total size for store is 216.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T21:21:15,603 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for fed75a742a271d54b31ef1b61aa6e434: 2024-12-03T21:21:15,603 INFO [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434., storeName=fed75a742a271d54b31ef1b61aa6e434/info, priority=13, startTime=1733260875566; duration=0sec 2024-12-03T21:21:15,603 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T21:21:15,603 DEBUG [RS:0;101545f66cbd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fed75a742a271d54b31ef1b61aa6e434:info 2024-12-03T21:21:15,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:15,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:16,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:16,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:17,567 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-12-03T21:21:17,568 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C46815%2C1733260829446.1733260877568 2024-12-03T21:21:17,574 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:17,574 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:17,574 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:17,574 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:17,575 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:17,575 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/WALs/101545f66cbd,46815,1733260829446/101545f66cbd%2C46815%2C1733260829446.1733260829960 with entries=316, filesize=309.49 KB; new WAL /user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/WALs/101545f66cbd,46815,1733260829446/101545f66cbd%2C46815%2C1733260829446.1733260877568 2024-12-03T21:21:17,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741833_1009 (size=316925) 2024-12-03T21:21:17,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741833_1009 (size=316925) 2024-12-03T21:21:17,584 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/WALs/101545f66cbd,46815,1733260829446/101545f66cbd%2C46815%2C1733260829446.1733260829960 to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/oldWALs/101545f66cbd%2C46815%2C1733260829446.1733260829960 2024-12-03T21:21:17,585 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46439:46439),(127.0.0.1/127.0.0.1:34459:34459)] 2024-12-03T21:21:17,589 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-03T21:21:17,589 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T21:21:17,589 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T21:21:17,589 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:21:17,589 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:21:17,589 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T21:21:17,589 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T21:21:17,590 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=744538088, stopped=false 2024-12-03T21:21:17,590 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=101545f66cbd,36423,1733260829269 2024-12-03T21:21:17,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1019e5b14290001, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T21:21:17,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36423-0x1019e5b14290000, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T21:21:17,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1019e5b14290001, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:21:17,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36423-0x1019e5b14290000, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:21:17,632 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T21:21:17,633 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T21:21:17,633 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T21:21:17,633 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:21:17,633 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36423-0x1019e5b14290000, quorum=127.0.0.1:59875, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T21:21:17,633 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46815-0x1019e5b14290001, quorum=127.0.0.1:59875, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T21:21:17,634 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '101545f66cbd,46815,1733260829446' ***** 2024-12-03T21:21:17,634 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T21:21:17,634 INFO [RS:0;101545f66cbd:46815 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T21:21:17,634 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T21:21:17,634 INFO [RS:0;101545f66cbd:46815 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T21:21:17,634 INFO [RS:0;101545f66cbd:46815 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T21:21:17,635 INFO [RS:0;101545f66cbd:46815 {}] regionserver.HRegionServer(3091): Received CLOSE for fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:21:17,635 INFO [RS:0;101545f66cbd:46815 {}] regionserver.HRegionServer(3091): Received CLOSE for db378a0bdb82104b38e97404c19dfca3 2024-12-03T21:21:17,635 INFO [RS:0;101545f66cbd:46815 {}] regionserver.HRegionServer(959): stopping server 101545f66cbd,46815,1733260829446 2024-12-03T21:21:17,635 INFO [RS:0;101545f66cbd:46815 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T21:21:17,635 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing fed75a742a271d54b31ef1b61aa6e434, disabling compactions & flushes 2024-12-03T21:21:17,635 INFO [RS:0;101545f66cbd:46815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;101545f66cbd:46815. 2024-12-03T21:21:17,635 INFO [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434. 2024-12-03T21:21:17,635 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434. 2024-12-03T21:21:17,635 DEBUG [RS:0;101545f66cbd:46815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T21:21:17,635 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434. after waiting 0 ms 2024-12-03T21:21:17,635 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434. 2024-12-03T21:21:17,635 DEBUG [RS:0;101545f66cbd:46815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:21:17,635 INFO [RS:0;101545f66cbd:46815 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T21:21:17,635 INFO [RS:0;101545f66cbd:46815 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T21:21:17,636 INFO [RS:0;101545f66cbd:46815 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T21:21:17,636 INFO [RS:0;101545f66cbd:46815 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-03T21:21:17,638 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/42549edde49946f8aaacc07e1233d23c.8b29fa6633d68e784d80fd25d509e979->hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/42549edde49946f8aaacc07e1233d23c-top, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/12b3de28781e407294b0b7a42f575e44, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/c9a67ff6e2f141f48559f7296e573473, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/c6fa659271ce4fedbcc69ec16a154199, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/f23bfd0eb78f4cca95b229a82087defc, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/ed013b885f504bd18b9290367ace446e, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/25dced6fbaa940f18244c63096434a2d, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/d92bc7174bd34333b9c6b944b4544929, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/4f559c03005a4f07a6b4b4c324661930, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/f310208e3fb7408d909f30bc03048d8e, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/7e56cf78eb7642068dd461b5a5d26509, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/70b4c099db0f4200a35e97b6deb11728, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/8b3dabff78954d7786827e0df329ea4d, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/fcc6b854dbd44a02969e87b3acf2f4b5, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/eb44eb1b602f499abeb369f700fb58fa, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/f483c7a6b2684045b169a72a19685320, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/daa2dccf93e9406abdd9759e293fd58f, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/5de607ef689f4bab841ecbc3334aaff0, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/2fd635d83a5742dbbc6205a0139645ab, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/154411d193f64a02b459d365308d3f9f, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/035fb8755beb466abd8d00de20b0f26f, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/2ac0a60b152d4f03ac0836b34dcde13a, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/7158aca93b5b49ee8e462c4455f67ee8, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/ce07050be4694576bc347c4a5665a81e, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/c20ee2e9ca1d4c8ebcd094e3c947d5a0] to archive 2024-12-03T21:21:17,639 INFO [RS:0;101545f66cbd:46815 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-12-03T21:21:17,639 DEBUG [RS:0;101545f66cbd:46815 {}] regionserver.HRegionServer(1325): Online Regions={fed75a742a271d54b31ef1b61aa6e434=TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434., db378a0bdb82104b38e97404c19dfca3=TestLogRolling-testLogRolling,,1733260853198.db378a0bdb82104b38e97404c19dfca3., 1588230740=hbase:meta,,1.1588230740} 2024-12-03T21:21:17,639 DEBUG [RS:0;101545f66cbd:46815 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, db378a0bdb82104b38e97404c19dfca3, fed75a742a271d54b31ef1b61aa6e434 2024-12-03T21:21:17,639 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T21:21:17,639 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T21:21:17,639 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T21:21:17,639 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T21:21:17,639 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T21:21:17,639 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T21:21:17,640 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-12-03T21:21:17,641 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/42549edde49946f8aaacc07e1233d23c.8b29fa6633d68e784d80fd25d509e979 to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/archive/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/42549edde49946f8aaacc07e1233d23c.8b29fa6633d68e784d80fd25d509e979 2024-12-03T21:21:17,642 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/12b3de28781e407294b0b7a42f575e44 to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/archive/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/12b3de28781e407294b0b7a42f575e44 2024-12-03T21:21:17,643 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/c9a67ff6e2f141f48559f7296e573473 to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/archive/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/c9a67ff6e2f141f48559f7296e573473 2024-12-03T21:21:17,644 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/c6fa659271ce4fedbcc69ec16a154199 to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/archive/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/c6fa659271ce4fedbcc69ec16a154199 2024-12-03T21:21:17,646 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/f23bfd0eb78f4cca95b229a82087defc to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/archive/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/f23bfd0eb78f4cca95b229a82087defc 2024-12-03T21:21:17,646 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/hbase/meta/1588230740/.tmp/info/91e2958a828e4e42b2eb3d77cbf09ad0 is 193, key is TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434./info:regioninfo/1733260853973/Put/seqid=0 2024-12-03T21:21:17,647 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/ed013b885f504bd18b9290367ace446e to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/archive/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/ed013b885f504bd18b9290367ace446e 2024-12-03T21:21:17,648 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/25dced6fbaa940f18244c63096434a2d to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/archive/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/25dced6fbaa940f18244c63096434a2d 2024-12-03T21:21:17,649 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/d92bc7174bd34333b9c6b944b4544929 to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/archive/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/d92bc7174bd34333b9c6b944b4544929 2024-12-03T21:21:17,650 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/4f559c03005a4f07a6b4b4c324661930 to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/archive/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/4f559c03005a4f07a6b4b4c324661930 2024-12-03T21:21:17,651 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/f310208e3fb7408d909f30bc03048d8e to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/archive/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/f310208e3fb7408d909f30bc03048d8e 2024-12-03T21:21:17,652 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/7e56cf78eb7642068dd461b5a5d26509 to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/archive/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/7e56cf78eb7642068dd461b5a5d26509 2024-12-03T21:21:17,654 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/70b4c099db0f4200a35e97b6deb11728 to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/archive/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/70b4c099db0f4200a35e97b6deb11728 2024-12-03T21:21:17,655 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/8b3dabff78954d7786827e0df329ea4d to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/archive/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/8b3dabff78954d7786827e0df329ea4d 2024-12-03T21:21:17,656 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/fcc6b854dbd44a02969e87b3acf2f4b5 to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/archive/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/fcc6b854dbd44a02969e87b3acf2f4b5 2024-12-03T21:21:17,657 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/eb44eb1b602f499abeb369f700fb58fa to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/archive/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/eb44eb1b602f499abeb369f700fb58fa 2024-12-03T21:21:17,658 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/f483c7a6b2684045b169a72a19685320 to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/archive/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/f483c7a6b2684045b169a72a19685320 2024-12-03T21:21:17,660 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/daa2dccf93e9406abdd9759e293fd58f to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/archive/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/daa2dccf93e9406abdd9759e293fd58f 2024-12-03T21:21:17,661 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/5de607ef689f4bab841ecbc3334aaff0 to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/archive/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/5de607ef689f4bab841ecbc3334aaff0 2024-12-03T21:21:17,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741879_1055 (size=6223) 2024-12-03T21:21:17,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741879_1055 (size=6223) 2024-12-03T21:21:17,662 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/hbase/meta/1588230740/.tmp/info/91e2958a828e4e42b2eb3d77cbf09ad0 2024-12-03T21:21:17,663 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/2fd635d83a5742dbbc6205a0139645ab to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/archive/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/2fd635d83a5742dbbc6205a0139645ab 2024-12-03T21:21:17,664 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/154411d193f64a02b459d365308d3f9f to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/archive/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/154411d193f64a02b459d365308d3f9f 2024-12-03T21:21:17,666 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/035fb8755beb466abd8d00de20b0f26f to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/archive/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/035fb8755beb466abd8d00de20b0f26f 2024-12-03T21:21:17,668 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/2ac0a60b152d4f03ac0836b34dcde13a to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/archive/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/2ac0a60b152d4f03ac0836b34dcde13a 2024-12-03T21:21:17,668 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/hbase/meta/1588230740/.tmp/info/91e2958a828e4e42b2eb3d77cbf09ad0 as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/hbase/meta/1588230740/info/91e2958a828e4e42b2eb3d77cbf09ad0 2024-12-03T21:21:17,669 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/7158aca93b5b49ee8e462c4455f67ee8 to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/archive/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/7158aca93b5b49ee8e462c4455f67ee8 2024-12-03T21:21:17,671 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/ce07050be4694576bc347c4a5665a81e to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/archive/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/ce07050be4694576bc347c4a5665a81e 2024-12-03T21:21:17,673 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/c20ee2e9ca1d4c8ebcd094e3c947d5a0 to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/archive/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/info/c20ee2e9ca1d4c8ebcd094e3c947d5a0 2024-12-03T21:21:17,673 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=101545f66cbd:36423 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-03T21:21:17,674 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [12b3de28781e407294b0b7a42f575e44=8260, c9a67ff6e2f141f48559f7296e573473=12509, c6fa659271ce4fedbcc69ec16a154199=31009, f23bfd0eb78f4cca95b229a82087defc=20064, ed013b885f504bd18b9290367ace446e=17895, 25dced6fbaa940f18244c63096434a2d=55934, d92bc7174bd34333b9c6b944b4544929=16828, 4f559c03005a4f07a6b4b4c324661930=16828, f310208e3fb7408d909f30bc03048d8e=78811, 7e56cf78eb7642068dd461b5a5d26509=15750, 70b4c099db0f4200a35e97b6deb11728=17906, 8b3dabff78954d7786827e0df329ea4d=110183, fcc6b854dbd44a02969e87b3acf2f4b5=23316, eb44eb1b602f499abeb369f700fb58fa=12516, f483c7a6b2684045b169a72a19685320=129821, daa2dccf93e9406abdd9759e293fd58f=16828, 5de607ef689f4bab841ecbc3334aaff0=19000, 2fd635d83a5742dbbc6205a0139645ab=151559, 154411d193f64a02b459d365308d3f9f=12519, 035fb8755beb466abd8d00de20b0f26f=16839, 2ac0a60b152d4f03ac0836b34dcde13a=178644, 7158aca93b5b49ee8e462c4455f67ee8=20092, ce07050be4694576bc347c4a5665a81e=12523, c20ee2e9ca1d4c8ebcd094e3c947d5a0=22254] 2024-12-03T21:21:17,675 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/hbase/meta/1588230740/info/91e2958a828e4e42b2eb3d77cbf09ad0, entries=5, sequenceid=21, filesize=6.1 K 2024-12-03T21:21:17,676 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 37ms, sequenceid=21, compaction requested=false 2024-12-03T21:21:17,680 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/fed75a742a271d54b31ef1b61aa6e434/recovered.edits/340.seqid, newMaxSeqId=340, maxSeqId=85 2024-12-03T21:21:17,681 INFO [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434. 2024-12-03T21:21:17,681 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for fed75a742a271d54b31ef1b61aa6e434: Waiting for close lock at 1733260877635Running coprocessor pre-close hooks at 1733260877635Disabling compacts and flushes for region at 1733260877635Disabling writes for close at 1733260877635Writing region close event to WAL at 1733260877675 (+40 ms)Running coprocessor post-close hooks at 1733260877681 (+6 ms)Closed at 1733260877681 2024-12-03T21:21:17,681 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1733260853198.fed75a742a271d54b31ef1b61aa6e434. 2024-12-03T21:21:17,681 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing db378a0bdb82104b38e97404c19dfca3, disabling compactions & flushes 2024-12-03T21:21:17,681 INFO [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733260853198.db378a0bdb82104b38e97404c19dfca3. 2024-12-03T21:21:17,681 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733260853198.db378a0bdb82104b38e97404c19dfca3. 2024-12-03T21:21:17,681 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733260853198.db378a0bdb82104b38e97404c19dfca3. after waiting 0 ms 2024-12-03T21:21:17,681 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733260853198.db378a0bdb82104b38e97404c19dfca3. 2024-12-03T21:21:17,683 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733260853198.db378a0bdb82104b38e97404c19dfca3.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/db378a0bdb82104b38e97404c19dfca3/info/42549edde49946f8aaacc07e1233d23c.8b29fa6633d68e784d80fd25d509e979->hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/8b29fa6633d68e784d80fd25d509e979/info/42549edde49946f8aaacc07e1233d23c-bottom] to archive 2024-12-03T21:21:17,685 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733260853198.db378a0bdb82104b38e97404c19dfca3.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T21:21:17,687 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733260853198.db378a0bdb82104b38e97404c19dfca3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/db378a0bdb82104b38e97404c19dfca3/info/42549edde49946f8aaacc07e1233d23c.8b29fa6633d68e784d80fd25d509e979 to hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/archive/data/default/TestLogRolling-testLogRolling/db378a0bdb82104b38e97404c19dfca3/info/42549edde49946f8aaacc07e1233d23c.8b29fa6633d68e784d80fd25d509e979 2024-12-03T21:21:17,687 WARN [StoreCloser-TestLogRolling-testLogRolling,,1733260853198.db378a0bdb82104b38e97404c19dfca3.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-12-03T21:21:17,688 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-12-03T21:21:17,688 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T21:21:17,688 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T21:21:17,688 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733260877639Running coprocessor pre-close hooks at 1733260877639Disabling compacts and flushes for region at 1733260877639Disabling writes for close at 1733260877639Obtaining lock to block concurrent updates at 1733260877640 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733260877640Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=705, getHeapSize=2040, getOffHeapSize=0, getCellsCount=5 at 1733260877640Flushing stores of hbase:meta,,1.1588230740 at 1733260877643 (+3 ms)Flushing 1588230740/info: creating writer at 1733260877643Flushing 1588230740/info: appending metadata at 1733260877646 (+3 ms)Flushing 1588230740/info: closing flushed file at 1733260877646Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@43363ae9: reopening flushed file at 1733260877667 (+21 ms)Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 37ms, sequenceid=21, compaction requested=false at 1733260877676 (+9 ms)Writing region close event to WAL at 1733260877683 (+7 ms)Running coprocessor post-close hooks at 1733260877688 (+5 ms)Closed at 1733260877688 2024-12-03T21:21:17,688 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-03T21:21:17,696 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/data/default/TestLogRolling-testLogRolling/db378a0bdb82104b38e97404c19dfca3/recovered.edits/89.seqid, newMaxSeqId=89, maxSeqId=85 2024-12-03T21:21:17,697 INFO [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733260853198.db378a0bdb82104b38e97404c19dfca3. 2024-12-03T21:21:17,697 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for db378a0bdb82104b38e97404c19dfca3: Waiting for close lock at 1733260877681Running coprocessor pre-close hooks at 1733260877681Disabling compacts and flushes for region at 1733260877681Disabling writes for close at 1733260877681Writing region close event to WAL at 1733260877691 (+10 ms)Running coprocessor post-close hooks at 1733260877697 (+6 ms)Closed at 1733260877697 2024-12-03T21:21:17,697 DEBUG [RS_CLOSE_REGION-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1733260853198.db378a0bdb82104b38e97404c19dfca3. 2024-12-03T21:21:17,831 INFO [regionserver/101545f66cbd:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T21:21:17,839 INFO [RS:0;101545f66cbd:46815 {}] regionserver.HRegionServer(976): stopping server 101545f66cbd,46815,1733260829446; all regions closed. 2024-12-03T21:21:17,839 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:17,840 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:17,840 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:17,840 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:17,840 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:17,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741834_1010 (size=8107) 2024-12-03T21:21:17,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741834_1010 (size=8107) 2024-12-03T21:21:17,846 DEBUG [RS:0;101545f66cbd:46815 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/oldWALs 2024-12-03T21:21:17,846 INFO [RS:0;101545f66cbd:46815 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 101545f66cbd%2C46815%2C1733260829446.meta:.meta(num 1733260830391) 2024-12-03T21:21:17,847 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:17,847 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:17,847 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:17,847 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:17,849 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:17,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741878_1054 (size=778) 2024-12-03T21:21:17,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741878_1054 (size=778) 2024-12-03T21:21:17,854 DEBUG [RS:0;101545f66cbd:46815 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/oldWALs 2024-12-03T21:21:17,854 INFO [RS:0;101545f66cbd:46815 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 101545f66cbd%2C46815%2C1733260829446:(num 1733260877568) 2024-12-03T21:21:17,854 DEBUG [RS:0;101545f66cbd:46815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:21:17,854 INFO [RS:0;101545f66cbd:46815 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T21:21:17,855 INFO [RS:0;101545f66cbd:46815 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T21:21:17,855 INFO [RS:0;101545f66cbd:46815 {}] hbase.ChoreService(370): Chore service for: regionserver/101545f66cbd:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-03T21:21:17,855 INFO [RS:0;101545f66cbd:46815 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T21:21:17,855 INFO [RS:0;101545f66cbd:46815 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46815 2024-12-03T21:21:17,856 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T21:21:17,907 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1019e5b14290001, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/101545f66cbd,46815,1733260829446 2024-12-03T21:21:17,907 INFO [RS:0;101545f66cbd:46815 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T21:21:17,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36423-0x1019e5b14290000, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T21:21:17,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:17,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:17,969 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [101545f66cbd,46815,1733260829446] 2024-12-03T21:21:18,062 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/101545f66cbd,46815,1733260829446 already deleted, retry=false 2024-12-03T21:21:18,062 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 101545f66cbd,46815,1733260829446 expired; onlineServers=0 2024-12-03T21:21:18,062 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '101545f66cbd,36423,1733260829269' ***** 2024-12-03T21:21:18,062 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T21:21:18,062 INFO [M:0;101545f66cbd:36423 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T21:21:18,062 INFO [M:0;101545f66cbd:36423 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T21:21:18,063 DEBUG [M:0;101545f66cbd:36423 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T21:21:18,063 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T21:21:18,063 DEBUG [M:0;101545f66cbd:36423 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T21:21:18,063 DEBUG [master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.large.0-1733260829773 {}] cleaner.HFileCleaner(306): Exit Thread[master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.large.0-1733260829773,5,FailOnTimeoutGroup] 2024-12-03T21:21:18,063 DEBUG [master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.small.0-1733260829774 {}] cleaner.HFileCleaner(306): Exit Thread[master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.small.0-1733260829774,5,FailOnTimeoutGroup] 2024-12-03T21:21:18,063 INFO [M:0;101545f66cbd:36423 {}] hbase.ChoreService(370): Chore service for: master/101545f66cbd:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-03T21:21:18,063 INFO [M:0;101545f66cbd:36423 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T21:21:18,063 DEBUG [M:0;101545f66cbd:36423 {}] master.HMaster(1795): Stopping service threads 2024-12-03T21:21:18,063 INFO [M:0;101545f66cbd:36423 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T21:21:18,063 INFO [M:0;101545f66cbd:36423 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T21:21:18,063 INFO [M:0;101545f66cbd:36423 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T21:21:18,063 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T21:21:18,069 INFO [RS:0;101545f66cbd:46815 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T21:21:18,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1019e5b14290001, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T21:21:18,069 INFO [RS:0;101545f66cbd:46815 {}] regionserver.HRegionServer(1031): Exiting; stopping=101545f66cbd,46815,1733260829446; zookeeper connection closed. 2024-12-03T21:21:18,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1019e5b14290001, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T21:21:18,070 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5f7d0df6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5f7d0df6 2024-12-03T21:21:18,070 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-03T21:21:18,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36423-0x1019e5b14290000, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T21:21:18,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36423-0x1019e5b14290000, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:21:18,118 DEBUG [M:0;101545f66cbd:36423 {}] zookeeper.ZKUtil(347): master:36423-0x1019e5b14290000, quorum=127.0.0.1:59875, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-03T21:21:18,118 WARN [M:0;101545f66cbd:36423 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-03T21:21:18,119 INFO [M:0;101545f66cbd:36423 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/.lastflushedseqids 2024-12-03T21:21:18,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741880_1056 (size=228) 2024-12-03T21:21:18,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741880_1056 (size=228) 2024-12-03T21:21:18,132 INFO [M:0;101545f66cbd:36423 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-03T21:21:18,132 INFO [M:0;101545f66cbd:36423 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T21:21:18,132 DEBUG [M:0;101545f66cbd:36423 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T21:21:18,132 INFO [M:0;101545f66cbd:36423 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:21:18,132 DEBUG [M:0;101545f66cbd:36423 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:21:18,132 DEBUG [M:0;101545f66cbd:36423 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T21:21:18,132 DEBUG [M:0;101545f66cbd:36423 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:21:18,133 INFO [M:0;101545f66cbd:36423 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.35 KB 2024-12-03T21:21:18,146 DEBUG [M:0;101545f66cbd:36423 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ac51582975944cd1afbef10aaedaa355 is 82, key is hbase:meta,,1/info:regioninfo/1733260830414/Put/seqid=0 2024-12-03T21:21:18,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741881_1057 (size=5672) 2024-12-03T21:21:18,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741881_1057 (size=5672) 2024-12-03T21:21:18,151 INFO [M:0;101545f66cbd:36423 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ac51582975944cd1afbef10aaedaa355 2024-12-03T21:21:18,172 DEBUG [M:0;101545f66cbd:36423 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8c4fa1a035b948b6aa436a8ec2e1d351 is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733260830965/Put/seqid=0 2024-12-03T21:21:18,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741882_1058 (size=7090) 2024-12-03T21:21:18,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741882_1058 (size=7090) 2024-12-03T21:21:18,176 INFO [M:0;101545f66cbd:36423 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8c4fa1a035b948b6aa436a8ec2e1d351 2024-12-03T21:21:18,180 INFO [M:0;101545f66cbd:36423 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8c4fa1a035b948b6aa436a8ec2e1d351 2024-12-03T21:21:18,195 DEBUG [M:0;101545f66cbd:36423 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/192384ceddd34ae9955fa0987221b625 is 69, key is 101545f66cbd,46815,1733260829446/rs:state/1733260829809/Put/seqid=0 2024-12-03T21:21:18,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741883_1059 (size=5156) 2024-12-03T21:21:18,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741883_1059 (size=5156) 2024-12-03T21:21:18,200 INFO [M:0;101545f66cbd:36423 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/192384ceddd34ae9955fa0987221b625 2024-12-03T21:21:18,216 DEBUG [M:0;101545f66cbd:36423 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/24c6eabfae434ec99bd910c026262361 is 52, key is load_balancer_on/state:d/1733260830589/Put/seqid=0 2024-12-03T21:21:18,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741884_1060 (size=5056) 2024-12-03T21:21:18,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741884_1060 (size=5056) 2024-12-03T21:21:18,221 INFO [M:0;101545f66cbd:36423 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/24c6eabfae434ec99bd910c026262361 2024-12-03T21:21:18,225 DEBUG [M:0;101545f66cbd:36423 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ac51582975944cd1afbef10aaedaa355 as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ac51582975944cd1afbef10aaedaa355 2024-12-03T21:21:18,229 INFO [M:0;101545f66cbd:36423 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ac51582975944cd1afbef10aaedaa355, entries=8, sequenceid=125, filesize=5.5 K 2024-12-03T21:21:18,229 DEBUG [M:0;101545f66cbd:36423 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8c4fa1a035b948b6aa436a8ec2e1d351 as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8c4fa1a035b948b6aa436a8ec2e1d351 2024-12-03T21:21:18,233 INFO [M:0;101545f66cbd:36423 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8c4fa1a035b948b6aa436a8ec2e1d351 2024-12-03T21:21:18,233 INFO [M:0;101545f66cbd:36423 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8c4fa1a035b948b6aa436a8ec2e1d351, entries=13, sequenceid=125, filesize=6.9 K 2024-12-03T21:21:18,234 DEBUG [M:0;101545f66cbd:36423 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/192384ceddd34ae9955fa0987221b625 as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/192384ceddd34ae9955fa0987221b625 2024-12-03T21:21:18,238 INFO [M:0;101545f66cbd:36423 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/192384ceddd34ae9955fa0987221b625, entries=1, sequenceid=125, filesize=5.0 K 2024-12-03T21:21:18,238 DEBUG [M:0;101545f66cbd:36423 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/24c6eabfae434ec99bd910c026262361 as hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/24c6eabfae434ec99bd910c026262361 2024-12-03T21:21:18,242 INFO [M:0;101545f66cbd:36423 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43303/user/jenkins/test-data/362981e4-ccd1-c41e-60c3-aa529a8a6c8b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/24c6eabfae434ec99bd910c026262361, entries=1, sequenceid=125, filesize=4.9 K 2024-12-03T21:21:18,243 INFO [M:0;101545f66cbd:36423 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 111ms, sequenceid=125, compaction requested=false 2024-12-03T21:21:18,244 INFO [M:0;101545f66cbd:36423 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:21:18,245 DEBUG [M:0;101545f66cbd:36423 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733260878132Disabling compacts and flushes for region at 1733260878132Disabling writes for close at 1733260878132Obtaining lock to block concurrent updates at 1733260878133 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733260878133Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64808, getOffHeapSize=0, getCellsCount=148 at 1733260878133Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733260878133Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733260878134 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733260878146 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733260878146Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733260878155 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733260878171 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733260878171Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733260878180 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733260878194 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733260878194Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733260878204 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733260878216 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733260878216Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@19ecf75a: reopening flushed file at 1733260878224 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4bc0c45b: reopening flushed file at 1733260878229 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3553b1ec: reopening flushed file at 1733260878233 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f791aff: reopening flushed file at 1733260878238 (+5 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 111ms, sequenceid=125, compaction requested=false at 1733260878243 (+5 ms)Writing region close event to WAL at 1733260878244 (+1 ms)Closed at 1733260878244 2024-12-03T21:21:18,245 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:18,245 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:18,245 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:18,245 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:18,245 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:18,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741830_1006 (size=61320) 2024-12-03T21:21:18,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33457 is added to blk_1073741830_1006 (size=61320) 2024-12-03T21:21:18,247 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T21:21:18,247 INFO [M:0;101545f66cbd:36423 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-03T21:21:18,248 INFO [M:0;101545f66cbd:36423 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36423 2024-12-03T21:21:18,248 INFO [M:0;101545f66cbd:36423 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T21:21:18,407 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36423-0x1019e5b14290000, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T21:21:18,407 INFO [M:0;101545f66cbd:36423 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T21:21:18,408 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36423-0x1019e5b14290000, quorum=127.0.0.1:59875, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T21:21:18,413 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@29b0734d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:21:18,414 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5ea981d3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T21:21:18,414 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T21:21:18,414 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@78cfb61d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T21:21:18,414 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ec1477d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/hadoop.log.dir/,STOPPED} 2024-12-03T21:21:18,417 WARN [BP-1637139593-172.17.0.2-1733260827364 heartbeating to localhost/127.0.0.1:43303 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T21:21:18,417 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T21:21:18,417 WARN [BP-1637139593-172.17.0.2-1733260827364 heartbeating to localhost/127.0.0.1:43303 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1637139593-172.17.0.2-1733260827364 (Datanode Uuid 04cfc471-2499-4a0a-bbaa-5aabf7de1124) service to localhost/127.0.0.1:43303 2024-12-03T21:21:18,417 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T21:21:18,418 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/cluster_a65c1612-ba9e-9907-ee69-5b7a4f9b8525/data/data3/current/BP-1637139593-172.17.0.2-1733260827364 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:21:18,418 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/cluster_a65c1612-ba9e-9907-ee69-5b7a4f9b8525/data/data4/current/BP-1637139593-172.17.0.2-1733260827364 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:21:18,418 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T21:21:18,425 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4aa2d315{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:21:18,425 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2079ccf2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T21:21:18,425 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T21:21:18,425 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@148b6f5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T21:21:18,425 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5c8f3fae{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/hadoop.log.dir/,STOPPED} 2024-12-03T21:21:18,427 WARN [BP-1637139593-172.17.0.2-1733260827364 heartbeating to localhost/127.0.0.1:43303 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T21:21:18,427 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T21:21:18,427 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T21:21:18,427 WARN [BP-1637139593-172.17.0.2-1733260827364 heartbeating to localhost/127.0.0.1:43303 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1637139593-172.17.0.2-1733260827364 (Datanode Uuid 6d8216db-721c-4cc5-9911-7ed327a974cf) service to localhost/127.0.0.1:43303 2024-12-03T21:21:18,427 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/cluster_a65c1612-ba9e-9907-ee69-5b7a4f9b8525/data/data1/current/BP-1637139593-172.17.0.2-1733260827364 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:21:18,427 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/cluster_a65c1612-ba9e-9907-ee69-5b7a4f9b8525/data/data2/current/BP-1637139593-172.17.0.2-1733260827364 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:21:18,428 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T21:21:18,433 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5fd0bcc5{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T21:21:18,433 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@30591dbb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T21:21:18,433 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T21:21:18,433 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@40e58970{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T21:21:18,433 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@55e9d4da{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/hadoop.log.dir/,STOPPED} 2024-12-03T21:21:18,439 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-03T21:21:18,465 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-03T21:21:18,504 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=230 (was 207) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43303 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:43303 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43303 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:43303 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43303 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:43303 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43303 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:43303 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=515 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=269 (was 146) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4836 (was 3038) - AvailableMemoryMB LEAK? - 2024-12-03T21:21:18,513 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=230, OpenFileDescriptor=515, MaxFileDescriptor=1048576, SystemLoadAverage=269, ProcessCount=11, AvailableMemoryMB=4836 2024-12-03T21:21:18,513 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T21:21:18,514 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/hadoop.log.dir so I do NOT create it in target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458 2024-12-03T21:21:18,514 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/46d898f1-a85b-25cd-7f0f-a43c3b741095/hadoop.tmp.dir so I do NOT create it in target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458 2024-12-03T21:21:18,514 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/cluster_d9743bd1-a967-7e38-1204-d3b260fb7927, deleteOnExit=true 2024-12-03T21:21:18,514 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-03T21:21:18,514 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/test.cache.data in system properties and HBase conf 2024-12-03T21:21:18,514 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T21:21:18,514 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/hadoop.log.dir in system properties and HBase conf 2024-12-03T21:21:18,514 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T21:21:18,514 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T21:21:18,514 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-03T21:21:18,515 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T21:21:18,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T21:21:18,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T21:21:18,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T21:21:18,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T21:21:18,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T21:21:18,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T21:21:18,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T21:21:18,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T21:21:18,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T21:21:18,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/nfs.dump.dir in system properties and HBase conf 2024-12-03T21:21:18,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/java.io.tmpdir in system properties and HBase conf 2024-12-03T21:21:18,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T21:21:18,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T21:21:18,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T21:21:18,530 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T21:21:18,940 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:18,940 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:19,021 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:21:19,025 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T21:21:19,027 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T21:21:19,027 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T21:21:19,027 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T21:21:19,032 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:21:19,032 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@48d6dbcb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/hadoop.log.dir/,AVAILABLE} 2024-12-03T21:21:19,032 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3fee4cec{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T21:21:19,123 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@52e2305b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/java.io.tmpdir/jetty-localhost-45513-hadoop-hdfs-3_4_1-tests_jar-_-any-12743393753690544191/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T21:21:19,123 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5d4c84e7{HTTP/1.1, (http/1.1)}{localhost:45513} 2024-12-03T21:21:19,124 INFO [Time-limited test {}] server.Server(415): Started @292221ms 2024-12-03T21:21:19,134 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T21:21:19,350 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:21:19,353 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T21:21:19,353 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T21:21:19,353 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T21:21:19,353 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T21:21:19,354 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@58de1e11{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/hadoop.log.dir/,AVAILABLE} 2024-12-03T21:21:19,354 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3353f5c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T21:21:19,452 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f07cd2f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/java.io.tmpdir/jetty-localhost-41553-hadoop-hdfs-3_4_1-tests_jar-_-any-8947436905940756548/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:21:19,452 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4cdbdfd7{HTTP/1.1, (http/1.1)}{localhost:41553} 2024-12-03T21:21:19,452 INFO [Time-limited test {}] server.Server(415): Started @292550ms 2024-12-03T21:21:19,453 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T21:21:19,477 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T21:21:19,479 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T21:21:19,480 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T21:21:19,480 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T21:21:19,480 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T21:21:19,480 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4612fdce{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/hadoop.log.dir/,AVAILABLE} 2024-12-03T21:21:19,481 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a40f8b4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T21:21:19,571 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@fbadfdb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/java.io.tmpdir/jetty-localhost-46165-hadoop-hdfs-3_4_1-tests_jar-_-any-12253483189437865932/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:21:19,572 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4b64ea29{HTTP/1.1, (http/1.1)}{localhost:46165} 2024-12-03T21:21:19,572 INFO [Time-limited test {}] server.Server(415): Started @292669ms 2024-12-03T21:21:19,573 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T21:21:19,940 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:19,940 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:20,325 WARN [Thread-2466 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/cluster_d9743bd1-a967-7e38-1204-d3b260fb7927/data/data1/current/BP-1690931866-172.17.0.2-1733260878533/current, will proceed with Du for space computation calculation, 2024-12-03T21:21:20,326 WARN [Thread-2467 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/cluster_d9743bd1-a967-7e38-1204-d3b260fb7927/data/data2/current/BP-1690931866-172.17.0.2-1733260878533/current, will proceed with Du for space computation calculation, 2024-12-03T21:21:20,347 WARN [Thread-2430 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T21:21:20,348 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x16ee4fff355535f with lease ID 0x2c1cd37267add737: Processing first storage report for DS-5064463d-5ce4-4f27-902d-e796a923c4d4 from datanode DatanodeRegistration(127.0.0.1:33681, datanodeUuid=5228981b-8f53-4408-82fc-b3e94a1600a9, infoPort=42095, infoSecurePort=0, ipcPort=41165, storageInfo=lv=-57;cid=testClusterID;nsid=524693050;c=1733260878533) 2024-12-03T21:21:20,348 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x16ee4fff355535f with lease ID 0x2c1cd37267add737: from storage DS-5064463d-5ce4-4f27-902d-e796a923c4d4 node DatanodeRegistration(127.0.0.1:33681, datanodeUuid=5228981b-8f53-4408-82fc-b3e94a1600a9, infoPort=42095, infoSecurePort=0, ipcPort=41165, storageInfo=lv=-57;cid=testClusterID;nsid=524693050;c=1733260878533), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:21:20,349 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x16ee4fff355535f with lease ID 0x2c1cd37267add737: Processing first storage report for DS-3820f458-c88e-42f9-aebe-8ad4c8cbf576 from datanode DatanodeRegistration(127.0.0.1:33681, datanodeUuid=5228981b-8f53-4408-82fc-b3e94a1600a9, infoPort=42095, infoSecurePort=0, ipcPort=41165, storageInfo=lv=-57;cid=testClusterID;nsid=524693050;c=1733260878533) 2024-12-03T21:21:20,349 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x16ee4fff355535f with lease ID 0x2c1cd37267add737: from storage DS-3820f458-c88e-42f9-aebe-8ad4c8cbf576 node DatanodeRegistration(127.0.0.1:33681, datanodeUuid=5228981b-8f53-4408-82fc-b3e94a1600a9, infoPort=42095, infoSecurePort=0, ipcPort=41165, storageInfo=lv=-57;cid=testClusterID;nsid=524693050;c=1733260878533), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:21:20,667 WARN [Thread-2477 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/cluster_d9743bd1-a967-7e38-1204-d3b260fb7927/data/data3/current/BP-1690931866-172.17.0.2-1733260878533/current, will proceed with Du for space computation calculation, 2024-12-03T21:21:20,667 WARN [Thread-2478 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/cluster_d9743bd1-a967-7e38-1204-d3b260fb7927/data/data4/current/BP-1690931866-172.17.0.2-1733260878533/current, will proceed with Du for space computation calculation, 2024-12-03T21:21:20,684 WARN [Thread-2453 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T21:21:20,686 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4576deb994366b0a with lease ID 0x2c1cd37267add738: Processing first storage report for DS-b7459d65-d3a5-42cb-bff3-3031b132f7ef from datanode DatanodeRegistration(127.0.0.1:37733, datanodeUuid=1d414bad-d6d6-4dab-b660-c9bdebb99c40, infoPort=32963, infoSecurePort=0, ipcPort=33581, storageInfo=lv=-57;cid=testClusterID;nsid=524693050;c=1733260878533) 2024-12-03T21:21:20,686 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4576deb994366b0a with lease ID 0x2c1cd37267add738: from storage DS-b7459d65-d3a5-42cb-bff3-3031b132f7ef node DatanodeRegistration(127.0.0.1:37733, datanodeUuid=1d414bad-d6d6-4dab-b660-c9bdebb99c40, infoPort=32963, infoSecurePort=0, ipcPort=33581, storageInfo=lv=-57;cid=testClusterID;nsid=524693050;c=1733260878533), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:21:20,686 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4576deb994366b0a with lease ID 0x2c1cd37267add738: Processing first storage report for DS-da69072f-a5fc-4efa-b405-5b5343a89af7 from datanode DatanodeRegistration(127.0.0.1:37733, datanodeUuid=1d414bad-d6d6-4dab-b660-c9bdebb99c40, infoPort=32963, infoSecurePort=0, ipcPort=33581, storageInfo=lv=-57;cid=testClusterID;nsid=524693050;c=1733260878533) 2024-12-03T21:21:20,686 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4576deb994366b0a with lease ID 0x2c1cd37267add738: from storage DS-da69072f-a5fc-4efa-b405-5b5343a89af7 node DatanodeRegistration(127.0.0.1:37733, datanodeUuid=1d414bad-d6d6-4dab-b660-c9bdebb99c40, infoPort=32963, infoSecurePort=0, ipcPort=33581, storageInfo=lv=-57;cid=testClusterID;nsid=524693050;c=1733260878533), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T21:21:20,697 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458 2024-12-03T21:21:20,703 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/cluster_d9743bd1-a967-7e38-1204-d3b260fb7927/zookeeper_0, clientPort=54429, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/cluster_d9743bd1-a967-7e38-1204-d3b260fb7927/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/cluster_d9743bd1-a967-7e38-1204-d3b260fb7927/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T21:21:20,705 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54429 2024-12-03T21:21:20,705 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:21:20,706 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:21:20,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33681 is added to blk_1073741825_1001 (size=7) 2024-12-03T21:21:20,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37733 is added to blk_1073741825_1001 (size=7) 2024-12-03T21:21:20,721 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477 with version=8 2024-12-03T21:21:20,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:43101/user/jenkins/test-data/27c430b4-3072-5762-34a6-c3d023af80f6/hbase-staging 2024-12-03T21:21:20,723 INFO [Time-limited test {}] client.ConnectionUtils(128): master/101545f66cbd:0 server-side Connection retries=45 2024-12-03T21:21:20,723 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T21:21:20,723 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T21:21:20,723 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T21:21:20,723 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T21:21:20,724 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T21:21:20,724 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-03T21:21:20,724 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T21:21:20,724 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46879 2024-12-03T21:21:20,725 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46879 connecting to ZooKeeper ensemble=127.0.0.1:54429 2024-12-03T21:21:20,794 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:468790x0, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T21:21:20,794 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46879-0x1019e5bdd240000 connected 2024-12-03T21:21:20,859 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:21:20,860 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:21:20,862 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46879-0x1019e5bdd240000, quorum=127.0.0.1:54429, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T21:21:20,862 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477, hbase.cluster.distributed=false 2024-12-03T21:21:20,863 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46879-0x1019e5bdd240000, quorum=127.0.0.1:54429, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T21:21:20,864 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46879 2024-12-03T21:21:20,864 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46879 2024-12-03T21:21:20,864 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46879 2024-12-03T21:21:20,864 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46879 2024-12-03T21:21:20,864 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46879 2024-12-03T21:21:20,876 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/101545f66cbd:0 server-side Connection retries=45 2024-12-03T21:21:20,877 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T21:21:20,877 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T21:21:20,877 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T21:21:20,877 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T21:21:20,877 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T21:21:20,877 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T21:21:20,877 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T21:21:20,877 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34287 2024-12-03T21:21:20,878 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34287 connecting to ZooKeeper ensemble=127.0.0.1:54429 2024-12-03T21:21:20,878 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:21:20,880 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:21:20,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:342870x0, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T21:21:20,893 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34287-0x1019e5bdd240001 connected 2024-12-03T21:21:20,893 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34287-0x1019e5bdd240001, quorum=127.0.0.1:54429, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T21:21:20,893 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T21:21:20,893 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T21:21:20,894 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34287-0x1019e5bdd240001, quorum=127.0.0.1:54429, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T21:21:20,895 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34287-0x1019e5bdd240001, quorum=127.0.0.1:54429, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T21:21:20,895 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34287 2024-12-03T21:21:20,897 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34287 2024-12-03T21:21:20,899 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34287 2024-12-03T21:21:20,899 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34287 2024-12-03T21:21:20,900 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34287 2024-12-03T21:21:20,912 DEBUG [M:0;101545f66cbd:46879 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;101545f66cbd:46879 2024-12-03T21:21:20,912 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/101545f66cbd,46879,1733260880723 2024-12-03T21:21:20,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34287-0x1019e5bdd240001, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T21:21:20,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x1019e5bdd240000, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T21:21:20,917 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46879-0x1019e5bdd240000, quorum=127.0.0.1:54429, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/101545f66cbd,46879,1733260880723 2024-12-03T21:21:20,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34287-0x1019e5bdd240001, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T21:21:20,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x1019e5bdd240000, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:21:20,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34287-0x1019e5bdd240001, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:21:20,926 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46879-0x1019e5bdd240000, quorum=127.0.0.1:54429, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T21:21:20,926 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/101545f66cbd,46879,1733260880723 from backup master directory 2024-12-03T21:21:20,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34287-0x1019e5bdd240001, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T21:21:20,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x1019e5bdd240000, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/101545f66cbd,46879,1733260880723 2024-12-03T21:21:20,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x1019e5bdd240000, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T21:21:20,934 WARN [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T21:21:20,934 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=101545f66cbd,46879,1733260880723 2024-12-03T21:21:20,937 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/hbase.id] with ID: 9b32e2d4-2a97-459e-bce4-044605729345 2024-12-03T21:21:20,937 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/.tmp/hbase.id 2024-12-03T21:21:20,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:20,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:20,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37733 is added to blk_1073741826_1002 (size=42) 2024-12-03T21:21:20,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33681 is added to blk_1073741826_1002 (size=42) 2024-12-03T21:21:20,943 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/.tmp/hbase.id]:[hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/hbase.id] 2024-12-03T21:21:20,954 INFO [master/101545f66cbd:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:21:20,954 INFO [master/101545f66cbd:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-03T21:21:20,955 INFO [master/101545f66cbd:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-03T21:21:20,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34287-0x1019e5bdd240001, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:21:20,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x1019e5bdd240000, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:21:20,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33681 is added to blk_1073741827_1003 (size=196) 2024-12-03T21:21:20,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37733 is added to blk_1073741827_1003 (size=196) 2024-12-03T21:21:20,974 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T21:21:20,975 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T21:21:20,975 INFO [master/101545f66cbd:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T21:21:20,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37733 is added to blk_1073741828_1004 (size=1189) 2024-12-03T21:21:20,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33681 is added to blk_1073741828_1004 (size=1189) 2024-12-03T21:21:20,985 INFO [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/MasterData/data/master/store 2024-12-03T21:21:20,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33681 is added to blk_1073741829_1005 (size=34) 2024-12-03T21:21:20,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37733 is added to blk_1073741829_1005 (size=34) 2024-12-03T21:21:20,990 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:21:20,991 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T21:21:20,991 INFO [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:21:20,991 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:21:20,991 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T21:21:20,991 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:21:20,991 INFO [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:21:20,991 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733260880991Disabling compacts and flushes for region at 1733260880991Disabling writes for close at 1733260880991Writing region close event to WAL at 1733260880991Closed at 1733260880991 2024-12-03T21:21:20,991 WARN [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/MasterData/data/master/store/.initializing 2024-12-03T21:21:20,991 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/MasterData/WALs/101545f66cbd,46879,1733260880723 2024-12-03T21:21:20,993 INFO [master/101545f66cbd:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=101545f66cbd%2C46879%2C1733260880723, suffix=, logDir=hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/MasterData/WALs/101545f66cbd,46879,1733260880723, archiveDir=hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/MasterData/oldWALs, maxLogs=10 2024-12-03T21:21:20,994 INFO [master/101545f66cbd:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C46879%2C1733260880723.1733260880994 2024-12-03T21:21:20,998 INFO [master/101545f66cbd:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/MasterData/WALs/101545f66cbd,46879,1733260880723/101545f66cbd%2C46879%2C1733260880723.1733260880994 2024-12-03T21:21:20,999 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42095:42095),(127.0.0.1/127.0.0.1:32963:32963)] 2024-12-03T21:21:21,003 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T21:21:21,003 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:21:21,003 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:21:21,004 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:21:21,005 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:21:21,006 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T21:21:21,006 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:21:21,006 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:21:21,006 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:21:21,007 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T21:21:21,007 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:21:21,008 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T21:21:21,008 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:21:21,009 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T21:21:21,009 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:21:21,009 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T21:21:21,009 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:21:21,010 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T21:21:21,010 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:21:21,010 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T21:21:21,010 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:21:21,011 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:21:21,011 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:21:21,012 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:21:21,012 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:21:21,013 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T21:21:21,014 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T21:21:21,015 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T21:21:21,016 INFO [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=749993, jitterRate=-0.04633568227291107}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T21:21:21,016 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733260881004Initializing all the Stores at 1733260881004Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260881004Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260881005 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260881005Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260881005Cleaning up temporary data from old regions at 1733260881012 (+7 ms)Region opened successfully at 1733260881016 (+4 ms) 2024-12-03T21:21:21,016 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T21:21:21,021 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b04bb87, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=101545f66cbd/172.17.0.2:0 2024-12-03T21:21:21,022 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-03T21:21:21,023 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T21:21:21,023 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T21:21:21,023 INFO [master/101545f66cbd:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T21:21:21,023 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-03T21:21:21,024 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-03T21:21:21,024 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T21:21:21,026 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T21:21:21,028 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46879-0x1019e5bdd240000, quorum=127.0.0.1:54429, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T21:21:21,033 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-03T21:21:21,034 INFO [master/101545f66cbd:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T21:21:21,034 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46879-0x1019e5bdd240000, quorum=127.0.0.1:54429, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T21:21:21,042 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-03T21:21:21,042 INFO [master/101545f66cbd:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T21:21:21,043 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46879-0x1019e5bdd240000, quorum=127.0.0.1:54429, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T21:21:21,050 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-03T21:21:21,051 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46879-0x1019e5bdd240000, quorum=127.0.0.1:54429, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T21:21:21,059 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T21:21:21,061 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46879-0x1019e5bdd240000, quorum=127.0.0.1:54429, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T21:21:21,067 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T21:21:21,075 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34287-0x1019e5bdd240001, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T21:21:21,075 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x1019e5bdd240000, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T21:21:21,075 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34287-0x1019e5bdd240001, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:21:21,075 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x1019e5bdd240000, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:21:21,076 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=101545f66cbd,46879,1733260880723, sessionid=0x1019e5bdd240000, setting cluster-up flag (Was=false) 2024-12-03T21:21:21,092 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x1019e5bdd240000, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:21:21,092 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34287-0x1019e5bdd240001, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:21:21,125 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T21:21:21,127 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=101545f66cbd,46879,1733260880723 2024-12-03T21:21:21,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x1019e5bdd240000, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:21:21,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34287-0x1019e5bdd240001, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:21:21,167 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T21:21:21,168 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=101545f66cbd,46879,1733260880723 2024-12-03T21:21:21,169 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-03T21:21:21,171 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-03T21:21:21,171 INFO [master/101545f66cbd:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-03T21:21:21,171 INFO [master/101545f66cbd:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T21:21:21,172 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 101545f66cbd,46879,1733260880723 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T21:21:21,173 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/101545f66cbd:0, corePoolSize=5, maxPoolSize=5 2024-12-03T21:21:21,173 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/101545f66cbd:0, corePoolSize=5, maxPoolSize=5 2024-12-03T21:21:21,173 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/101545f66cbd:0, corePoolSize=5, maxPoolSize=5 2024-12-03T21:21:21,173 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/101545f66cbd:0, corePoolSize=5, maxPoolSize=5 2024-12-03T21:21:21,173 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/101545f66cbd:0, corePoolSize=10, maxPoolSize=10 2024-12-03T21:21:21,173 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:21:21,173 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/101545f66cbd:0, corePoolSize=2, maxPoolSize=2 2024-12-03T21:21:21,173 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:21:21,175 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733260911175 2024-12-03T21:21:21,175 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T21:21:21,175 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T21:21:21,175 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T21:21:21,175 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T21:21:21,175 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T21:21:21,175 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T21:21:21,176 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T21:21:21,176 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-03T21:21:21,177 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:21:21,177 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T21:21:21,179 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T21:21:21,180 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T21:21:21,180 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T21:21:21,180 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T21:21:21,181 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T21:21:21,181 INFO [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T21:21:21,181 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.large.0-1733260881181,5,FailOnTimeoutGroup] 2024-12-03T21:21:21,181 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.small.0-1733260881181,5,FailOnTimeoutGroup] 2024-12-03T21:21:21,181 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T21:21:21,181 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T21:21:21,181 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T21:21:21,181 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T21:21:21,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33681 is added to blk_1073741831_1007 (size=1321) 2024-12-03T21:21:21,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37733 is added to blk_1073741831_1007 (size=1321) 2024-12-03T21:21:21,186 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-03T21:21:21,186 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477 2024-12-03T21:21:21,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33681 is added to blk_1073741832_1008 (size=32) 2024-12-03T21:21:21,203 INFO [RS:0;101545f66cbd:34287 {}] regionserver.HRegionServer(746): ClusterId : 9b32e2d4-2a97-459e-bce4-044605729345 2024-12-03T21:21:21,204 DEBUG [RS:0;101545f66cbd:34287 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T21:21:21,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37733 is added to blk_1073741832_1008 (size=32) 2024-12-03T21:21:21,204 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:21:21,207 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T21:21:21,209 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T21:21:21,209 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:21:21,210 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:21:21,210 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T21:21:21,210 DEBUG [RS:0;101545f66cbd:34287 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T21:21:21,210 DEBUG [RS:0;101545f66cbd:34287 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T21:21:21,211 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T21:21:21,211 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:21:21,212 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:21:21,212 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T21:21:21,213 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T21:21:21,213 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:21:21,213 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:21:21,214 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T21:21:21,215 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T21:21:21,215 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:21:21,215 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:21:21,215 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T21:21:21,216 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/data/hbase/meta/1588230740 2024-12-03T21:21:21,217 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/data/hbase/meta/1588230740 2024-12-03T21:21:21,218 DEBUG [RS:0;101545f66cbd:34287 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T21:21:21,218 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T21:21:21,218 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T21:21:21,218 DEBUG [RS:0;101545f66cbd:34287 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4075eb4b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=101545f66cbd/172.17.0.2:0 2024-12-03T21:21:21,218 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T21:21:21,219 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T21:21:21,224 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T21:21:21,224 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=767235, jitterRate=-0.024411067366600037}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T21:21:21,225 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733260881204Initializing all the Stores at 1733260881205 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260881205Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260881207 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260881207Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260881207Cleaning up temporary data from old regions at 1733260881218 (+11 ms)Region opened successfully at 1733260881225 (+7 ms) 2024-12-03T21:21:21,225 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T21:21:21,225 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T21:21:21,225 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T21:21:21,226 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T21:21:21,226 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T21:21:21,227 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T21:21:21,227 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733260881225Disabling compacts and flushes for region at 1733260881225Disabling writes for close at 1733260881226 (+1 ms)Writing region close event to WAL at 1733260881226Closed at 1733260881226 2024-12-03T21:21:21,228 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T21:21:21,228 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-03T21:21:21,228 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T21:21:21,229 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T21:21:21,231 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T21:21:21,234 DEBUG [RS:0;101545f66cbd:34287 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;101545f66cbd:34287 2024-12-03T21:21:21,234 INFO [RS:0;101545f66cbd:34287 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T21:21:21,234 INFO [RS:0;101545f66cbd:34287 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T21:21:21,234 DEBUG [RS:0;101545f66cbd:34287 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T21:21:21,235 INFO [RS:0;101545f66cbd:34287 {}] regionserver.HRegionServer(2659): reportForDuty to master=101545f66cbd,46879,1733260880723 with port=34287, startcode=1733260880876 2024-12-03T21:21:21,235 DEBUG [RS:0;101545f66cbd:34287 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T21:21:21,238 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52059, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T21:21:21,238 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46879 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 101545f66cbd,34287,1733260880876 2024-12-03T21:21:21,238 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46879 {}] master.ServerManager(517): Registering regionserver=101545f66cbd,34287,1733260880876 2024-12-03T21:21:21,240 DEBUG [RS:0;101545f66cbd:34287 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477 2024-12-03T21:21:21,240 DEBUG [RS:0;101545f66cbd:34287 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38593 2024-12-03T21:21:21,240 DEBUG [RS:0;101545f66cbd:34287 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T21:21:21,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x1019e5bdd240000, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T21:21:21,251 DEBUG [RS:0;101545f66cbd:34287 {}] zookeeper.ZKUtil(111): regionserver:34287-0x1019e5bdd240001, quorum=127.0.0.1:54429, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/101545f66cbd,34287,1733260880876 2024-12-03T21:21:21,251 WARN [RS:0;101545f66cbd:34287 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T21:21:21,251 INFO [RS:0;101545f66cbd:34287 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T21:21:21,251 DEBUG [RS:0;101545f66cbd:34287 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/WALs/101545f66cbd,34287,1733260880876 2024-12-03T21:21:21,251 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [101545f66cbd,34287,1733260880876] 2024-12-03T21:21:21,255 INFO [RS:0;101545f66cbd:34287 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T21:21:21,256 INFO [RS:0;101545f66cbd:34287 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T21:21:21,256 INFO [RS:0;101545f66cbd:34287 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T21:21:21,256 INFO [RS:0;101545f66cbd:34287 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T21:21:21,256 INFO [RS:0;101545f66cbd:34287 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T21:21:21,257 INFO [RS:0;101545f66cbd:34287 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T21:21:21,257 INFO [RS:0;101545f66cbd:34287 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T21:21:21,257 DEBUG [RS:0;101545f66cbd:34287 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:21:21,257 DEBUG [RS:0;101545f66cbd:34287 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:21:21,257 DEBUG [RS:0;101545f66cbd:34287 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:21:21,257 DEBUG [RS:0;101545f66cbd:34287 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:21:21,257 DEBUG [RS:0;101545f66cbd:34287 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:21:21,257 DEBUG [RS:0;101545f66cbd:34287 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/101545f66cbd:0, corePoolSize=2, maxPoolSize=2 2024-12-03T21:21:21,257 DEBUG [RS:0;101545f66cbd:34287 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:21:21,257 DEBUG [RS:0;101545f66cbd:34287 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:21:21,257 DEBUG [RS:0;101545f66cbd:34287 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:21:21,257 DEBUG [RS:0;101545f66cbd:34287 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:21:21,257 DEBUG [RS:0;101545f66cbd:34287 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:21:21,257 DEBUG [RS:0;101545f66cbd:34287 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/101545f66cbd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T21:21:21,257 DEBUG [RS:0;101545f66cbd:34287 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/101545f66cbd:0, corePoolSize=3, maxPoolSize=3 2024-12-03T21:21:21,257 DEBUG [RS:0;101545f66cbd:34287 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/101545f66cbd:0, corePoolSize=3, maxPoolSize=3 2024-12-03T21:21:21,259 INFO [RS:0;101545f66cbd:34287 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T21:21:21,259 INFO [RS:0;101545f66cbd:34287 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T21:21:21,259 INFO [RS:0;101545f66cbd:34287 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T21:21:21,259 INFO [RS:0;101545f66cbd:34287 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T21:21:21,259 INFO [RS:0;101545f66cbd:34287 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T21:21:21,259 INFO [RS:0;101545f66cbd:34287 {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,34287,1733260880876-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T21:21:21,273 INFO [RS:0;101545f66cbd:34287 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T21:21:21,274 INFO [RS:0;101545f66cbd:34287 {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,34287,1733260880876-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T21:21:21,274 INFO [RS:0;101545f66cbd:34287 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:21:21,274 INFO [RS:0;101545f66cbd:34287 {}] regionserver.Replication(171): 101545f66cbd,34287,1733260880876 started 2024-12-03T21:21:21,289 INFO [RS:0;101545f66cbd:34287 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:21:21,289 INFO [RS:0;101545f66cbd:34287 {}] regionserver.HRegionServer(1482): Serving as 101545f66cbd,34287,1733260880876, RpcServer on 101545f66cbd/172.17.0.2:34287, sessionid=0x1019e5bdd240001 2024-12-03T21:21:21,289 DEBUG [RS:0;101545f66cbd:34287 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T21:21:21,289 DEBUG [RS:0;101545f66cbd:34287 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 101545f66cbd,34287,1733260880876 2024-12-03T21:21:21,289 DEBUG [RS:0;101545f66cbd:34287 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '101545f66cbd,34287,1733260880876' 2024-12-03T21:21:21,289 DEBUG [RS:0;101545f66cbd:34287 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T21:21:21,290 DEBUG [RS:0;101545f66cbd:34287 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T21:21:21,290 DEBUG [RS:0;101545f66cbd:34287 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T21:21:21,290 DEBUG [RS:0;101545f66cbd:34287 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T21:21:21,290 DEBUG [RS:0;101545f66cbd:34287 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 101545f66cbd,34287,1733260880876 2024-12-03T21:21:21,290 DEBUG [RS:0;101545f66cbd:34287 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '101545f66cbd,34287,1733260880876' 2024-12-03T21:21:21,290 DEBUG [RS:0;101545f66cbd:34287 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T21:21:21,291 DEBUG [RS:0;101545f66cbd:34287 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T21:21:21,291 DEBUG [RS:0;101545f66cbd:34287 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T21:21:21,291 INFO [RS:0;101545f66cbd:34287 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T21:21:21,291 INFO [RS:0;101545f66cbd:34287 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T21:21:21,381 WARN [101545f66cbd:46879 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-03T21:21:21,393 INFO [RS:0;101545f66cbd:34287 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=101545f66cbd%2C34287%2C1733260880876, suffix=, logDir=hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/WALs/101545f66cbd,34287,1733260880876, archiveDir=hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/oldWALs, maxLogs=32 2024-12-03T21:21:21,393 INFO [RS:0;101545f66cbd:34287 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C34287%2C1733260880876.1733260881393 2024-12-03T21:21:21,399 INFO [RS:0;101545f66cbd:34287 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/WALs/101545f66cbd,34287,1733260880876/101545f66cbd%2C34287%2C1733260880876.1733260881393 2024-12-03T21:21:21,403 DEBUG [RS:0;101545f66cbd:34287 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32963:32963),(127.0.0.1/127.0.0.1:42095:42095)] 2024-12-03T21:21:21,631 DEBUG [101545f66cbd:46879 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-03T21:21:21,632 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=101545f66cbd,34287,1733260880876 2024-12-03T21:21:21,633 INFO [PEWorker-2 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 101545f66cbd,34287,1733260880876, state=OPENING 2024-12-03T21:21:21,658 DEBUG [PEWorker-2 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T21:21:21,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x1019e5bdd240000, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:21:21,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34287-0x1019e5bdd240001, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:21:21,667 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T21:21:21,667 DEBUG [PEWorker-2 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T21:21:21,667 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T21:21:21,667 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=101545f66cbd,34287,1733260880876}] 2024-12-03T21:21:21,820 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T21:21:21,822 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46199, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T21:21:21,826 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-03T21:21:21,826 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T21:21:21,828 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=101545f66cbd%2C34287%2C1733260880876.meta, suffix=.meta, logDir=hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/WALs/101545f66cbd,34287,1733260880876, archiveDir=hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/oldWALs, maxLogs=32 2024-12-03T21:21:21,828 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 101545f66cbd%2C34287%2C1733260880876.meta.1733260881828.meta 2024-12-03T21:21:21,833 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/WALs/101545f66cbd,34287,1733260880876/101545f66cbd%2C34287%2C1733260880876.meta.1733260881828.meta 2024-12-03T21:21:21,834 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32963:32963),(127.0.0.1/127.0.0.1:42095:42095)] 2024-12-03T21:21:21,835 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T21:21:21,835 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T21:21:21,835 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T21:21:21,835 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T21:21:21,835 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T21:21:21,835 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T21:21:21,835 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-03T21:21:21,835 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-03T21:21:21,836 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T21:21:21,837 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T21:21:21,837 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:21:21,838 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:21:21,838 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T21:21:21,838 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T21:21:21,838 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:21:21,839 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:21:21,839 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T21:21:21,840 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T21:21:21,840 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:21:21,840 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:21:21,840 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T21:21:21,841 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T21:21:21,841 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T21:21:21,841 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T21:21:21,841 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T21:21:21,842 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/data/hbase/meta/1588230740 2024-12-03T21:21:21,842 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/data/hbase/meta/1588230740 2024-12-03T21:21:21,843 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T21:21:21,843 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T21:21:21,844 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T21:21:21,845 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T21:21:21,845 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=770864, jitterRate=-0.01979658007621765}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T21:21:21,845 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-03T21:21:21,846 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733260881835Writing region info on filesystem at 1733260881835Initializing all the Stores at 1733260881836 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260881836Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260881836Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733260881836Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733260881836Cleaning up temporary data from old regions at 1733260881843 (+7 ms)Running coprocessor post-open hooks at 1733260881845 (+2 ms)Region opened successfully at 1733260881846 (+1 ms) 2024-12-03T21:21:21,847 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733260881820 2024-12-03T21:21:21,849 DEBUG [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T21:21:21,849 INFO [RS_OPEN_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-03T21:21:21,850 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=101545f66cbd,34287,1733260880876 2024-12-03T21:21:21,850 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 101545f66cbd,34287,1733260880876, state=OPEN 2024-12-03T21:21:21,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34287-0x1019e5bdd240001, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T21:21:21,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x1019e5bdd240000, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T21:21:21,911 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=101545f66cbd,34287,1733260880876 2024-12-03T21:21:21,911 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T21:21:21,911 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T21:21:21,914 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T21:21:21,914 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=101545f66cbd,34287,1733260880876 in 244 msec 2024-12-03T21:21:21,917 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T21:21:21,917 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 686 msec 2024-12-03T21:21:21,918 DEBUG [PEWorker-3 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T21:21:21,918 INFO [PEWorker-3 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-03T21:21:21,919 DEBUG [PEWorker-3 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T21:21:21,920 DEBUG [PEWorker-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=101545f66cbd,34287,1733260880876, seqNum=-1] 2024-12-03T21:21:21,920 DEBUG [PEWorker-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T21:21:21,921 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39185, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T21:21:21,926 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 755 msec 2024-12-03T21:21:21,926 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733260881926, completionTime=-1 2024-12-03T21:21:21,927 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-03T21:21:21,927 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-03T21:21:21,929 INFO [master/101545f66cbd:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-03T21:21:21,929 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733260941929 2024-12-03T21:21:21,929 INFO [master/101545f66cbd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733261001929 2024-12-03T21:21:21,929 INFO [master/101545f66cbd:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-03T21:21:21,930 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,46879,1733260880723-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T21:21:21,930 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,46879,1733260880723-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:21:21,930 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,46879,1733260880723-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:21:21,930 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-101545f66cbd:46879, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:21:21,930 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T21:21:21,930 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-03T21:21:21,932 DEBUG [master/101545f66cbd:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-03T21:21:21,933 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.999sec 2024-12-03T21:21:21,933 INFO [master/101545f66cbd:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T21:21:21,933 INFO [master/101545f66cbd:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T21:21:21,933 INFO [master/101545f66cbd:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T21:21:21,933 INFO [master/101545f66cbd:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T21:21:21,934 INFO [master/101545f66cbd:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T21:21:21,934 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,46879,1733260880723-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T21:21:21,934 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,46879,1733260880723-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T21:21:21,936 DEBUG [master/101545f66cbd:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-03T21:21:21,936 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T21:21:21,936 INFO [master/101545f66cbd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=101545f66cbd,46879,1733260880723-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T21:21:21,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:21,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:22,003 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49fe510c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T21:21:22,003 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 101545f66cbd,46879,-1 for getting cluster id 2024-12-03T21:21:22,004 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T21:21:22,005 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '9b32e2d4-2a97-459e-bce4-044605729345' 2024-12-03T21:21:22,005 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T21:21:22,006 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "9b32e2d4-2a97-459e-bce4-044605729345" 2024-12-03T21:21:22,006 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f7222f4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T21:21:22,006 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [101545f66cbd,46879,-1] 2024-12-03T21:21:22,006 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T21:21:22,006 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:21:22,007 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42128, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T21:21:22,008 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@229a300d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T21:21:22,008 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T21:21:22,009 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=101545f66cbd,34287,1733260880876, seqNum=-1] 2024-12-03T21:21:22,009 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T21:21:22,009 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60876, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T21:21:22,011 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=101545f66cbd,46879,1733260880723 2024-12-03T21:21:22,011 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T21:21:22,013 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-03T21:21:22,013 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T21:21:22,015 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/WALs/test.com,8080,1, archiveDir=hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/oldWALs, maxLogs=32 2024-12-03T21:21:22,015 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733260882015 2024-12-03T21:21:22,020 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/WALs/test.com,8080,1/test.com%2C8080%2C1.1733260882015 2024-12-03T21:21:22,021 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42095:42095),(127.0.0.1/127.0.0.1:32963:32963)] 2024-12-03T21:21:22,022 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733260882022 2024-12-03T21:21:22,027 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:22,027 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:22,027 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:22,027 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:22,027 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:22,027 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/WALs/test.com,8080,1/test.com%2C8080%2C1.1733260882015 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/WALs/test.com,8080,1/test.com%2C8080%2C1.1733260882022 2024-12-03T21:21:22,028 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42095:42095),(127.0.0.1/127.0.0.1:32963:32963)] 2024-12-03T21:21:22,028 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/WALs/test.com,8080,1/test.com%2C8080%2C1.1733260882015 is not closed yet, will try archiving it next time 2024-12-03T21:21:22,028 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:22,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37733 is added to blk_1073741835_1011 (size=93) 2024-12-03T21:21:22,029 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:22,029 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:22,029 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:22,029 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:22,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33681 is added to blk_1073741835_1011 (size=93) 2024-12-03T21:21:22,030 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/WALs/test.com,8080,1/test.com%2C8080%2C1.1733260882015 to hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/oldWALs/test.com%2C8080%2C1.1733260882015 2024-12-03T21:21:22,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37733 is added to blk_1073741836_1012 (size=93) 2024-12-03T21:21:22,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33681 is added to blk_1073741836_1012 (size=93) 2024-12-03T21:21:22,034 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/oldWALs 2024-12-03T21:21:22,034 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1733260882022) 2024-12-03T21:21:22,034 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-03T21:21:22,034 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T21:21:22,034 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T21:21:22,034 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:21:22,034 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:21:22,034 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T21:21:22,034 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T21:21:22,034 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=724757396, stopped=false 2024-12-03T21:21:22,034 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=101545f66cbd,46879,1733260880723 2024-12-03T21:21:22,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x1019e5bdd240000, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T21:21:22,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34287-0x1019e5bdd240001, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T21:21:22,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34287-0x1019e5bdd240001, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:21:22,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x1019e5bdd240000, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:21:22,042 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T21:21:22,042 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T21:21:22,042 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T21:21:22,042 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:21:22,042 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46879-0x1019e5bdd240000, quorum=127.0.0.1:54429, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T21:21:22,042 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '101545f66cbd,34287,1733260880876' ***** 2024-12-03T21:21:22,043 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T21:21:22,043 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34287-0x1019e5bdd240001, quorum=127.0.0.1:54429, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T21:21:22,043 INFO [RS:0;101545f66cbd:34287 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T21:21:22,043 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T21:21:22,043 INFO [RS:0;101545f66cbd:34287 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T21:21:22,043 INFO [RS:0;101545f66cbd:34287 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T21:21:22,043 INFO [RS:0;101545f66cbd:34287 {}] regionserver.HRegionServer(959): stopping server 101545f66cbd,34287,1733260880876 2024-12-03T21:21:22,043 INFO [RS:0;101545f66cbd:34287 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T21:21:22,043 INFO [RS:0;101545f66cbd:34287 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;101545f66cbd:34287. 2024-12-03T21:21:22,043 DEBUG [RS:0;101545f66cbd:34287 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T21:21:22,043 DEBUG [RS:0;101545f66cbd:34287 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:21:22,043 INFO [RS:0;101545f66cbd:34287 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T21:21:22,043 INFO [RS:0;101545f66cbd:34287 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T21:21:22,043 INFO [RS:0;101545f66cbd:34287 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T21:21:22,043 INFO [RS:0;101545f66cbd:34287 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-03T21:21:22,044 INFO [RS:0;101545f66cbd:34287 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-03T21:21:22,044 DEBUG [RS:0;101545f66cbd:34287 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-03T21:21:22,044 DEBUG [RS:0;101545f66cbd:34287 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-03T21:21:22,044 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T21:21:22,044 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T21:21:22,044 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T21:21:22,044 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T21:21:22,044 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T21:21:22,044 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-03T21:21:22,064 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/data/hbase/meta/1588230740/.tmp/ns/5a6882dc152f4e99bbae468cabb8f1f9 is 43, key is default/ns:d/1733260881922/Put/seqid=0 2024-12-03T21:21:22,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33681 is added to blk_1073741837_1013 (size=5153) 2024-12-03T21:21:22,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37733 is added to blk_1073741837_1013 (size=5153) 2024-12-03T21:21:22,068 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/data/hbase/meta/1588230740/.tmp/ns/5a6882dc152f4e99bbae468cabb8f1f9 2024-12-03T21:21:22,074 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/data/hbase/meta/1588230740/.tmp/ns/5a6882dc152f4e99bbae468cabb8f1f9 as hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/data/hbase/meta/1588230740/ns/5a6882dc152f4e99bbae468cabb8f1f9 2024-12-03T21:21:22,079 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/data/hbase/meta/1588230740/ns/5a6882dc152f4e99bbae468cabb8f1f9, entries=2, sequenceid=6, filesize=5.0 K 2024-12-03T21:21:22,080 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 36ms, sequenceid=6, compaction requested=false 2024-12-03T21:21:22,080 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-03T21:21:22,083 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-03T21:21:22,084 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T21:21:22,084 INFO [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T21:21:22,084 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733260882044Running coprocessor pre-close hooks at 1733260882044Disabling compacts and flushes for region at 1733260882044Disabling writes for close at 1733260882044Obtaining lock to block concurrent updates at 1733260882044Preparing flush snapshotting stores in 1588230740 at 1733260882044Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733260882044Flushing stores of hbase:meta,,1.1588230740 at 1733260882045 (+1 ms)Flushing 1588230740/ns: creating writer at 1733260882045Flushing 1588230740/ns: appending metadata at 1733260882063 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1733260882063Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d2a5f2c: reopening flushed file at 1733260882073 (+10 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 36ms, sequenceid=6, compaction requested=false at 1733260882080 (+7 ms)Writing region close event to WAL at 1733260882081 (+1 ms)Running coprocessor post-close hooks at 1733260882084 (+3 ms)Closed at 1733260882084 2024-12-03T21:21:22,084 DEBUG [RS_CLOSE_META-regionserver/101545f66cbd:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-03T21:21:22,244 INFO [RS:0;101545f66cbd:34287 {}] regionserver.HRegionServer(976): stopping server 101545f66cbd,34287,1733260880876; all regions closed. 2024-12-03T21:21:22,244 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:22,245 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:22,245 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:22,245 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:22,245 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:22,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33681 is added to blk_1073741834_1010 (size=1152) 2024-12-03T21:21:22,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37733 is added to blk_1073741834_1010 (size=1152) 2024-12-03T21:21:22,255 DEBUG [RS:0;101545f66cbd:34287 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/oldWALs 2024-12-03T21:21:22,255 INFO [RS:0;101545f66cbd:34287 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 101545f66cbd%2C34287%2C1733260880876.meta:.meta(num 1733260881828) 2024-12-03T21:21:22,256 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:22,256 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:22,256 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:22,256 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:22,256 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:22,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33681 is added to blk_1073741833_1009 (size=93) 2024-12-03T21:21:22,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37733 is added to blk_1073741833_1009 (size=93) 2024-12-03T21:21:22,263 DEBUG [RS:0;101545f66cbd:34287 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/oldWALs 2024-12-03T21:21:22,263 INFO [RS:0;101545f66cbd:34287 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 101545f66cbd%2C34287%2C1733260880876:(num 1733260881393) 2024-12-03T21:21:22,263 DEBUG [RS:0;101545f66cbd:34287 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T21:21:22,263 INFO [RS:0;101545f66cbd:34287 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T21:21:22,264 INFO [RS:0;101545f66cbd:34287 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T21:21:22,264 INFO [RS:0;101545f66cbd:34287 {}] hbase.ChoreService(370): Chore service for: regionserver/101545f66cbd:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-03T21:21:22,264 INFO [RS:0;101545f66cbd:34287 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T21:21:22,264 INFO [regionserver/101545f66cbd:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T21:21:22,264 INFO [RS:0;101545f66cbd:34287 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34287 2024-12-03T21:21:22,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34287-0x1019e5bdd240001, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/101545f66cbd,34287,1733260880876 2024-12-03T21:21:22,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x1019e5bdd240000, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T21:21:22,275 INFO [RS:0;101545f66cbd:34287 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T21:21:22,276 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [101545f66cbd,34287,1733260880876] 2024-12-03T21:21:22,292 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/101545f66cbd,34287,1733260880876 already deleted, retry=false 2024-12-03T21:21:22,292 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 101545f66cbd,34287,1733260880876 expired; onlineServers=0 2024-12-03T21:21:22,292 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '101545f66cbd,46879,1733260880723' ***** 2024-12-03T21:21:22,292 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T21:21:22,292 INFO [M:0;101545f66cbd:46879 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T21:21:22,292 INFO [M:0;101545f66cbd:46879 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T21:21:22,292 DEBUG [M:0;101545f66cbd:46879 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T21:21:22,292 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T21:21:22,292 DEBUG [M:0;101545f66cbd:46879 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T21:21:22,292 DEBUG [master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.large.0-1733260881181 {}] cleaner.HFileCleaner(306): Exit Thread[master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.large.0-1733260881181,5,FailOnTimeoutGroup] 2024-12-03T21:21:22,293 DEBUG [master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.small.0-1733260881181 {}] cleaner.HFileCleaner(306): Exit Thread[master/101545f66cbd:0:becomeActiveMaster-HFileCleaner.small.0-1733260881181,5,FailOnTimeoutGroup] 2024-12-03T21:21:22,293 INFO [M:0;101545f66cbd:46879 {}] hbase.ChoreService(370): Chore service for: master/101545f66cbd:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-03T21:21:22,293 INFO [M:0;101545f66cbd:46879 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T21:21:22,293 DEBUG [M:0;101545f66cbd:46879 {}] master.HMaster(1795): Stopping service threads 2024-12-03T21:21:22,293 INFO [M:0;101545f66cbd:46879 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T21:21:22,293 INFO [M:0;101545f66cbd:46879 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T21:21:22,293 INFO [M:0;101545f66cbd:46879 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T21:21:22,293 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T21:21:22,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x1019e5bdd240000, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T21:21:22,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x1019e5bdd240000, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T21:21:22,301 INFO [M:0;101545f66cbd:46879 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/.lastflushedseqids 2024-12-03T21:21:22,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37733 is added to blk_1073741838_1014 (size=99) 2024-12-03T21:21:22,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33681 is added to blk_1073741838_1014 (size=99) 2024-12-03T21:21:22,307 INFO [M:0;101545f66cbd:46879 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-03T21:21:22,307 INFO [M:0;101545f66cbd:46879 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T21:21:22,307 DEBUG [M:0;101545f66cbd:46879 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T21:21:22,307 INFO [M:0;101545f66cbd:46879 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:21:22,307 DEBUG [M:0;101545f66cbd:46879 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:21:22,307 DEBUG [M:0;101545f66cbd:46879 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T21:21:22,307 DEBUG [M:0;101545f66cbd:46879 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:21:22,308 INFO [M:0;101545f66cbd:46879 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-03T21:21:22,320 DEBUG [M:0;101545f66cbd:46879 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a9c156ed58ed44908c12fc43810e0bf6 is 82, key is hbase:meta,,1/info:regioninfo/1733260881850/Put/seqid=0 2024-12-03T21:21:22,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33681 is added to blk_1073741839_1015 (size=5672) 2024-12-03T21:21:22,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37733 is added to blk_1073741839_1015 (size=5672) 2024-12-03T21:21:22,324 INFO [M:0;101545f66cbd:46879 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a9c156ed58ed44908c12fc43810e0bf6 2024-12-03T21:21:22,341 DEBUG [M:0;101545f66cbd:46879 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f072c1719f234f72b779fdb6093dbd50 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733260881926/Put/seqid=0 2024-12-03T21:21:22,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37733 is added to blk_1073741840_1016 (size=5275) 2024-12-03T21:21:22,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33681 is added to blk_1073741840_1016 (size=5275) 2024-12-03T21:21:22,345 INFO [M:0;101545f66cbd:46879 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f072c1719f234f72b779fdb6093dbd50 2024-12-03T21:21:22,348 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-12-03T21:21:22,349 DEBUG [RegionServerTracker-0 {}] master.ActiveMasterManager(353): master:46879-0x1019e5bdd240000, quorum=127.0.0.1:54429, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-12-03T21:21:22,367 DEBUG [M:0;101545f66cbd:46879 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/176775a4cb494444947e9a54ab572092 is 69, key is 101545f66cbd,34287,1733260880876/rs:state/1733260881239/Put/seqid=0 2024-12-03T21:21:22,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37733 is added to blk_1073741841_1017 (size=5156) 2024-12-03T21:21:22,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33681 is added to blk_1073741841_1017 (size=5156) 2024-12-03T21:21:22,372 INFO [M:0;101545f66cbd:46879 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/176775a4cb494444947e9a54ab572092 2024-12-03T21:21:22,384 INFO [RS:0;101545f66cbd:34287 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T21:21:22,384 INFO [RS:0;101545f66cbd:34287 {}] regionserver.HRegionServer(1031): Exiting; stopping=101545f66cbd,34287,1733260880876; zookeeper connection closed. 2024-12-03T21:21:22,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34287-0x1019e5bdd240001, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T21:21:22,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34287-0x1019e5bdd240001, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T21:21:22,387 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2700c5a1 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2700c5a1 2024-12-03T21:21:22,387 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-03T21:21:22,395 DEBUG [M:0;101545f66cbd:46879 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ba30a908fb2f4899b165e075eb939126 is 52, key is load_balancer_on/state:d/1733260882012/Put/seqid=0 2024-12-03T21:21:22,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33681 is added to blk_1073741842_1018 (size=5056) 2024-12-03T21:21:22,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37733 is added to blk_1073741842_1018 (size=5056) 2024-12-03T21:21:22,681 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:22,681 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:22,681 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:22,682 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:22,682 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:22,682 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:22,682 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:22,682 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:22,683 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:22,683 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:22,699 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:22,699 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:22,699 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:22,699 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:22,699 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:22,699 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:22,702 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:22,702 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:22,702 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:22,703 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:22,809 INFO [M:0;101545f66cbd:46879 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ba30a908fb2f4899b165e075eb939126 2024-12-03T21:21:22,814 DEBUG [M:0;101545f66cbd:46879 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a9c156ed58ed44908c12fc43810e0bf6 as hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a9c156ed58ed44908c12fc43810e0bf6 2024-12-03T21:21:22,820 INFO [M:0;101545f66cbd:46879 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a9c156ed58ed44908c12fc43810e0bf6, entries=8, sequenceid=29, filesize=5.5 K 2024-12-03T21:21:22,822 DEBUG [M:0;101545f66cbd:46879 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f072c1719f234f72b779fdb6093dbd50 as hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f072c1719f234f72b779fdb6093dbd50 2024-12-03T21:21:22,831 INFO [M:0;101545f66cbd:46879 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f072c1719f234f72b779fdb6093dbd50, entries=3, sequenceid=29, filesize=5.2 K 2024-12-03T21:21:22,832 DEBUG [M:0;101545f66cbd:46879 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/176775a4cb494444947e9a54ab572092 as hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/176775a4cb494444947e9a54ab572092 2024-12-03T21:21:22,837 INFO [M:0;101545f66cbd:46879 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/176775a4cb494444947e9a54ab572092, entries=1, sequenceid=29, filesize=5.0 K 2024-12-03T21:21:22,838 DEBUG [M:0;101545f66cbd:46879 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ba30a908fb2f4899b165e075eb939126 as hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ba30a908fb2f4899b165e075eb939126 2024-12-03T21:21:22,841 INFO [M:0;101545f66cbd:46879 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38593/user/jenkins/test-data/f1c4e80f-ee63-bd5d-77e6-0d84bc782477/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ba30a908fb2f4899b165e075eb939126, entries=1, sequenceid=29, filesize=4.9 K 2024-12-03T21:21:22,842 INFO [M:0;101545f66cbd:46879 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 535ms, sequenceid=29, compaction requested=false 2024-12-03T21:21:22,852 INFO [M:0;101545f66cbd:46879 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T21:21:22,852 DEBUG [M:0;101545f66cbd:46879 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733260882307Disabling compacts and flushes for region at 1733260882307Disabling writes for close at 1733260882307Obtaining lock to block concurrent updates at 1733260882308 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733260882308Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733260882308Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733260882308Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733260882308Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733260882320 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733260882320Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733260882328 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733260882340 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733260882341 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733260882349 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733260882367 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733260882367Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733260882376 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733260882394 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733260882394Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4750493b: reopening flushed file at 1733260882813 (+419 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3cb2ef0a: reopening flushed file at 1733260882821 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@77cc2422: reopening flushed file at 1733260882831 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@790e9d1f: reopening flushed file at 1733260882837 (+6 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 535ms, sequenceid=29, compaction requested=false at 1733260882842 (+5 ms)Writing region close event to WAL at 1733260882852 (+10 ms)Closed at 1733260882852 2024-12-03T21:21:22,855 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:22,855 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:22,856 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:22,856 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:22,856 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T21:21:22,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37733 is added to blk_1073741830_1006 (size=10311) 2024-12-03T21:21:22,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33681 is added to blk_1073741830_1006 (size=10311) 2024-12-03T21:21:22,942 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,45695,1733260691542/101545f66cbd%2C45695%2C1733260691542.meta.1733260692594.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:22,942 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36575/user/jenkins/test-data/ce32edeb-b3fb-bb2a-f927-faf2b389ea31/WALs/101545f66cbd,39741,1733260692808/101545f66cbd%2C39741%2C1733260692808.1733260693042 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T21:21:23,208 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T21:21:23,209 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:23,210 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:23,210 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:23,210 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:23,211 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:23,211 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:23,212 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:23,212 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:23,212 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:23,212 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:23,235 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:23,235 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:23,236 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:23,236 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:23,236 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:23,237 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:23,241 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:23,242 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:23,242 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:23,245 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T21:21:23,259 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T21:21:23,259 INFO [M:0;101545f66cbd:46879 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-03T21:21:23,259 INFO [M:0;101545f66cbd:46879 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46879 2024-12-03T21:21:23,259 INFO [M:0;101545f66cbd:46879 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T21:21:23,261 INFO [regionserver/101545f66cbd:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T21:21:23,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x1019e5bdd240000, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T21:21:23,367 INFO [M:0;101545f66cbd:46879 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T21:21:23,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x1019e5bdd240000, quorum=127.0.0.1:54429, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T21:21:23,372 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@fbadfdb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:21:23,373 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4b64ea29{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T21:21:23,373 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T21:21:23,373 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a40f8b4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T21:21:23,373 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4612fdce{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/hadoop.log.dir/,STOPPED} 2024-12-03T21:21:23,374 WARN [BP-1690931866-172.17.0.2-1733260878533 heartbeating to localhost/127.0.0.1:38593 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T21:21:23,374 WARN [BP-1690931866-172.17.0.2-1733260878533 heartbeating to localhost/127.0.0.1:38593 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1690931866-172.17.0.2-1733260878533 (Datanode Uuid 1d414bad-d6d6-4dab-b660-c9bdebb99c40) service to localhost/127.0.0.1:38593 2024-12-03T21:21:23,374 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T21:21:23,374 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T21:21:23,375 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/cluster_d9743bd1-a967-7e38-1204-d3b260fb7927/data/data3/current/BP-1690931866-172.17.0.2-1733260878533 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:21:23,375 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/cluster_d9743bd1-a967-7e38-1204-d3b260fb7927/data/data4/current/BP-1690931866-172.17.0.2-1733260878533 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:21:23,375 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T21:21:23,377 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f07cd2f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T21:21:23,377 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4cdbdfd7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T21:21:23,377 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T21:21:23,377 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3353f5c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T21:21:23,377 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@58de1e11{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/hadoop.log.dir/,STOPPED} 2024-12-03T21:21:23,378 WARN [BP-1690931866-172.17.0.2-1733260878533 heartbeating to localhost/127.0.0.1:38593 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T21:21:23,378 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T21:21:23,378 WARN [BP-1690931866-172.17.0.2-1733260878533 heartbeating to localhost/127.0.0.1:38593 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1690931866-172.17.0.2-1733260878533 (Datanode Uuid 5228981b-8f53-4408-82fc-b3e94a1600a9) service to localhost/127.0.0.1:38593 2024-12-03T21:21:23,378 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T21:21:23,379 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/cluster_d9743bd1-a967-7e38-1204-d3b260fb7927/data/data1/current/BP-1690931866-172.17.0.2-1733260878533 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:21:23,379 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T21:21:23,379 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/cluster_d9743bd1-a967-7e38-1204-d3b260fb7927/data/data2/current/BP-1690931866-172.17.0.2-1733260878533 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T21:21:23,385 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@52e2305b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T21:21:23,385 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5d4c84e7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T21:21:23,385 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T21:21:23,385 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3fee4cec{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T21:21:23,385 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@48d6dbcb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18946b55-4146-ec68-c2cd-d5600a3e5458/hadoop.log.dir/,STOPPED} 2024-12-03T21:21:23,391 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-03T21:21:23,405 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-03T21:21:23,414 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=268 (was 230) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38593 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:38593 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38593 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:38593 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38593 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38593 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:38593 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:38593 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=532 (was 515) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=287 (was 269) - SystemLoadAverage LEAK? -, ProcessCount=12 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=4548 (was 4836)